diff --git "a/1230.jsonl" "b/1230.jsonl" new file mode 100644--- /dev/null +++ "b/1230.jsonl" @@ -0,0 +1,394 @@ +{"seq_id": "399474414", "text": "from scrapy import Spider, Request\n\n\n\nclass QuotesSpider(Spider):\n name = 'Quotes'\n allowed_domains = ['quotes.toscrape.com/']\n start_urls = ['https://quotes.toscrape.com/']\n\n\n def parse(self, response):\n # # Basic spider\n # title = response.xpath('//h1/a/text()').extract()\n # top_ten_tags = response.xpath('//*[@class=\"tag-item\"]/a/text()').extract()\n #\n # yield {'Title': title, 'Top Ten Tags': top_ten_tags}\n\n\n # More advanced spider\n quotes = response.xpath('//div[@class=\"quote\"]')\n\n for quote in quotes:\n text = quote.xpath('./span[@class=\"text\"]/text()').extract_first()\n text = text.lstrip('\\“')\n text = text.rstrip('\\”')\n author = quote.xpath('./span[2]/small[@class=\"author\"]/text()').extract_first()\n tags = quote.xpath('./div[@class=\"tags\"]/a[@class=\"tag\"]/text()').extract()\n\n yield {'Text': text, 'Author': author, 'Tags': tags}\n\n relative_next_page_url = response.xpath('//li[@class=\"next\"]/a/@href').extract_first()\n absolute_next_page_url = response.urljoin(relative_next_page_url)\n # yield - Do not save value in memory - better memory efficiency\n yield Request(url=absolute_next_page_url, callback=self.parse)\n", "sub_path": "Quotes_Spider/Quotes_Spider/spiders/Quotes.py", "file_name": "Quotes.py", "file_ext": "py", "file_size_in_byte": 1297, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "scrapy.Spider", "line_number": 5, "usage_type": "name"}, {"api_name": "scrapy.Request", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "377971193", "text": "from rest_framework_simplejwt.tokens import RefreshToken\nfrom django.utils.six import text_type\n\n\ndef get_simplejwt_tokens(user):\n \"\"\"This foucntion get a User object and return 'access' and 'refresh' tokens.\"\"\"\n\n tokens = RefreshToken.for_user(user)\n refresh = text_type(tokens)\n access = text_type(tokens.access_token)\n data = {\n \"refresh\": refresh,\n \"access\": access\n }\n\n return data\n", "sub_path": "authentication/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 422, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "rest_framework_simplejwt.tokens.RefreshToken.for_user", "line_number": 8, "usage_type": "call"}, {"api_name": "rest_framework_simplejwt.tokens.RefreshToken", "line_number": 8, "usage_type": "name"}, {"api_name": "django.utils.six.text_type", "line_number": 9, "usage_type": "call"}, {"api_name": "django.utils.six.text_type", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "562517682", "text": "from flask import Blueprint, render_template\nfrom pylxd.client import Client\n\napp = Blueprint('Blueprint for networks infomation', __name__, url_prefix='/networks')\n\n@app.route('/')\ndef networks():\n client = Client()\n return render_template('networks.list.html',\n names = sorted([net.name for net in client.networks.all()]));\n\n@app.route('/details/')\ndef details(name):\n client = Client()\n\n network = client.networks.get(name)\n\n return render_template('networks.details.html', network = network)\n\n\n", "sub_path": "lxd-console/views/networks.py", "file_name": "networks.py", "file_ext": "py", "file_size_in_byte": 545, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "flask.Blueprint", "line_number": 4, "usage_type": "call"}, {"api_name": "pylxd.client.Client", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 9, "usage_type": "call"}, {"api_name": "pylxd.client.Client", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "400143931", "text": "import asyncio\nfrom lsst.ts import salobj\n\n\nasync def main():\n while True:\n try:\n print(\"Starting GenericCamera Simulator\")\n r = salobj.Remote(salobj.Domain(), \"GenericCamera\", index=1)\n await r.start_task\n await salobj.set_summary_state(r, salobj.State.ENABLED)\n await r.cmd_startLiveView.set_start(expTime=0.5)\n break\n except Exception as e:\n print(\"Error starting GenericCamera Simulator. Retrying in 1 second\")\n await asyncio.sleep(1)\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.create_task(main())\n loop.run_forever()\n", "sub_path": "simulator/gencam/simulator.py", "file_name": "simulator.py", "file_ext": "py", "file_size_in_byte": 661, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "lsst.ts.salobj.Remote", "line_number": 9, "usage_type": "call"}, {"api_name": "lsst.ts.salobj", "line_number": 9, "usage_type": "name"}, {"api_name": "lsst.ts.salobj.Domain", "line_number": 9, "usage_type": "call"}, {"api_name": "lsst.ts.salobj.set_summary_state", "line_number": 11, "usage_type": "call"}, {"api_name": "lsst.ts.salobj", "line_number": 11, "usage_type": "name"}, {"api_name": "lsst.ts.salobj.State", "line_number": 11, "usage_type": "attribute"}, {"api_name": "asyncio.sleep", "line_number": 16, "usage_type": "call"}, {"api_name": "asyncio.get_event_loop", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "442121152", "text": "from django.urls import path\nfrom . import views\nfrom django.conf.urls import url\nfrom .views import about, age_table, index, information, login, population, privacy, region, register\n\nurlpatterns = [\n path('', index, name=\"index\"),\n path('login/', login, name=\"login\"),\n path('register/', register, name=\"register\"),\n path('privacy/', privacy, name=\"privacy\"),\n path('about/', about, name=\"about\"),\n path('region/', region, name=\"region\"),\n path('population/', population, name=\"population\"),\n path('age_table/', age_table, name=\"age_table\"),\n path('information/', information, name=\"information\"),\n]", "sub_path": "django_project/application/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 628, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "views.index", "line_number": 7, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "views.login", "line_number": 8, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "views.register", "line_number": 9, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "views.privacy", "line_number": 10, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "views.about", "line_number": 11, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "views.region", "line_number": 12, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "views.population", "line_number": 13, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "views.age_table", "line_number": 14, "usage_type": "argument"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "views.information", "line_number": 15, "usage_type": "argument"}]} +{"seq_id": "383507389", "text": "#!/usr/bin/env python\n# coding=utf-8\n\nfrom __future__ import print_function\nfrom __future__ import division\n\n# 时间日期格式化符号\n# %y 两位数的年份表示 00-99\n# %Y 四位数的年份表示 000-9999\n# %m 月份 01-12\n# %d 月内中的一天 0-31\n# %H 24小时制小时数 0-23\n# %I 12小时制小时数 01-12\n# %M 分钟数 00-59\n# %S 秒 00-59\n# %a 本地简化星期名称\n# %A 本地完整星期名称\n# %b 本地简化的月份名称\n# %B 本地完整的月份名称\n# %c 本地相应的日期表示和时间表示\n# %j 年内的一天 001-366\n# %p 本地A.M.或P.M.的等价符\n# %U 一年中的星期数 00-53 星期天为星期的开始\n# %w 星期 0-6 星期天为星期的开始\n# %W 一年中的星期数 00-53 星期一为星期的开始\n# %x 本地相应的日期表示\n# %X 本地相应的时间表示\n# %Z 当前时区的名称\n\nimport time\n \ncurtime = time.time() # 1970纪元后经过的浮点秒数\nprint(curtime) # 1538396760.63\ntime_str = time.ctime(curtime) # 转为string格式\nprint(time_str) # => Fri May 5 18:28:08 2017\n\ntime_tup = time.localtime(curtime) # 转为struct_time(tuple)格式\nprint(time_tup)\n\nprint(time.altzone) # 夏令时与UTC的差值 -32400\nprint(time.timezone) # 本地时间与UTC的差值 -28800, -28800/3600==8.0\nprint(28800/3600)\n\nprint('#############')\nimport datetime\n \ndatetime_dt = datetime.datetime.today() # 获取当前日期和时间\ndatetime_str = datetime_dt.strftime(\"%Y-%m-%d %H:%M:%S\") # 格式化日期时间\nprint(datetime_str)\n \ntime_delta = datetime.timedelta(hours=-3)\ndatetime_pre = datetime_dt + time_delta\n# print(datetime_pre.ctime())\nprint(datetime_pre.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\nprint('###########')\ndt1 = datetime.datetime(2018, 2, 28)\ndelta = datetime.timedelta(days=1)\ndt2 = dt1 + delta\nprint(dt2.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\nprint('##########')\ndt1 = datetime.datetime(2018, 3, 28, 13, 5, 58)\ndelta = datetime.timedelta(seconds=2.1)\ndt2 = dt1 + delta\nprint(dt2.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n \n# # 将日期时间转为时间戳\n# time_s = datetime_dt.timestamp()\n# print(\"现在的时间戳: {}\".format(time_s))\n# date_time = datetime.datetime.strptime('Tue Jan 01 00:00:00 2008', '%a %b %d %H:%M:%S %Y') # 解析\n \n# # 实例方法\n# date = date_time.date() # 转为date => date(2017, 5, 6)\n# time = date_time.time() # 转为time => time(19, 10, 46, 149016)\n# time_s = date_time.timestamp() # 转为时间戳\n\n# num = time.hour\n# num = time.minute\n# mum = time.second\n# mum = time.microsecond\n \n# # replace([hour[, minute[, second[, microsecond[, tzinfo]]]]])\n# time_t = time.replace(hour=17) # 替换\n# time_s = time.isoformat() # 格式化 => '18:30:59'\n\nimport random\ndef gen_random_datetime(n):\n base_mark = datetime.datetime(2018, 2, 2, 0, 0, 0)\n datas = []\n for _ in range(n):\n random_sec = random.randint(3, 20)\n random_status = random.randint(0,1)\n delta = datetime.timedelta(seconds=random_sec)\n base_mark += delta\n base_mark_str = base_mark.strftime('%Y-%m-%d %H:%M:%S')\n datas.append((random_status, base_mark_str))\n return datas\n\nprint(gen_random_datetime(12))\n", "sub_path": "src/sql/date_time_lib.py", "file_name": "date_time_lib.py", "file_ext": "py", "file_size_in_byte": 3204, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "time.time", "line_number": 32, "usage_type": "call"}, {"api_name": "time.ctime", "line_number": 34, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 37, "usage_type": "call"}, {"api_name": "time.altzone", "line_number": 40, "usage_type": "attribute"}, {"api_name": "time.timezone", "line_number": 41, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 47, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 47, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 51, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 57, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 63, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 90, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 93, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 94, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "191538728", "text": "import controler.parser as script\nimport pytest\n\n\n@pytest.fixture\ndef query():\n return {\"message\": \"où se trouve la ville de dijon?\"}\n\n\ndef test_display_question_entered_by_the_user(query):\n 'Test function to check the return of of the parser class'\n\n question = script.Parser(query)\n assert question.show_question() == {'message':\n 'où se trouve la ville de dijon?'}\n\n\ndef test_to_lower_and_no_accents(query):\n 'Test function to pass a string to lower cases'\n\n question = script.Parser(query)\n assert question.to_lower_string() == 'ou se trouve la ville de dijon?'\n\n question = script.Parser({\"message\":\n 'OU SE TROUVE LA VILLE DE DIJON?'})\n assert question.to_lower_string() == 'ou se trouve la ville de dijon?'\n\n\ndef test_function_to_delete_stop_words(query):\n 'Test function to delete words present in a stop-word list'\n\n question = script.Parser(query)\n question.lowerStringQuestion = 'trouve ville dijon?'\n assert question.after_deleted_words() == 'trouve ville dijon?'\n\n\ndef test_extract_re_words_of_the_question(query):\n 'Test of the function that keeps only the last and usefull words'\n\n question = script.Parser(query)\n question.lowerStringQuestion = 'trouve ville dijon?'\n result = question.extract_question()\n assert result == ['ville', 'dijon?']\n", "sub_path": "tests/test_parser.py", "file_name": "test_parser.py", "file_ext": "py", "file_size_in_byte": 1383, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pytest.fixture", "line_number": 5, "usage_type": "attribute"}, {"api_name": "controler.parser.Parser", "line_number": 13, "usage_type": "call"}, {"api_name": "controler.parser", "line_number": 13, "usage_type": "name"}, {"api_name": "controler.parser.Parser", "line_number": 21, "usage_type": "call"}, {"api_name": "controler.parser", "line_number": 21, "usage_type": "name"}, {"api_name": "controler.parser.Parser", "line_number": 24, "usage_type": "call"}, {"api_name": "controler.parser", "line_number": 24, "usage_type": "name"}, {"api_name": "controler.parser.Parser", "line_number": 32, "usage_type": "call"}, {"api_name": "controler.parser", "line_number": 32, "usage_type": "name"}, {"api_name": "controler.parser.Parser", "line_number": 40, "usage_type": "call"}, {"api_name": "controler.parser", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "512128967", "text": "import requests\nimport time\nimport random\nfrom lxml import etree\n\nclass MaoyanSpider(object):\n def __init__(self):\n self.baseurl = 'https://maoyan.com/board/4?offset={}'\n self.headers = {\n 'User-Agent': 'User-Agent:Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50'\n }\n # 添加计数\n self.page = 1\n\n def get_page(self,url):\n res = requests.get(url,headers = self.headers)\n res.encoding = 'utf-8'\n html = res.text\n # print(html)\n self.parse_page(html)\n\n\n def parse_page(self,html):\n # 创建解析对象\n parse_html = etree.HTML(html)\n # 获取基准的xpath表达式,获取dd节点对象列表\n dd_list = parse_html.xpath('//*[@id=\"app\"]/div/div/div[1]/dl/dd')\n # for遍历,依次获取每个电影dd信息\n for dd in dd_list:\n # 名称\n name = dd.xpath('./div/div/div[1]/p[1]/a/text()')\n if name:\n name[0].strip()\n else:\n name = 'null'\n # 主演\n star = dd.xpath('./div/div/div[1]/p[2]/text()')[0].strip()\n # 时间\n time = dd.xpath('./div/div/div[1]/p[3]/text()')[0].strip()[5:15]\n print({\n '电影名称':name,\n \"主演\":star,\n \"时间\":time,\n })\n\n\n def main(self):\n for offset in range(0,21,10):\n url = self.baseurl.format(str(offset))\n self.get_page(url)\n print('第%s页完成' % self.page)\n self.page +=1\n time.sleep(random.randint(1,3))\n\n\n\n\nif __name__ == '__main__':\n spider = MaoyanSpider()\n spider.main()", "sub_path": "day03/02_maoyan_xpath.py", "file_name": "02_maoyan_xpath.py", "file_ext": "py", "file_size_in_byte": 1768, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 25, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 25, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 53, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 53, "usage_type": "call"}]} +{"seq_id": "476676174", "text": "# -*- coding:utf8 -*-\n__author__ = 'Zovven'\n\nimport mistune\n\n\nclass TocMixin(object):\n \"\"\"TOC mixin for Renderer, mix this with Renderer::\n class TocRenderer(TocMixin, Renderer):\n pass\n toc = TocRenderer()\n md = mistune.Markdown(renderer=toc)\n # required in this order\n toc.reset_toc() # initial the status\n md.parse(text) # parse for headers\n toc.render_toc(level=3) # render TOC HTML\n \"\"\"\n\n def reset_toc(self):\n self.toc_tree = []\n self.toc_count = 0\n\n def header(self, text, level, raw=None):\n rv = '%s\\n' % (\n level, self.toc_count, text, level\n )\n self.toc_tree.append((self.toc_count, text, level, raw))\n self.toc_count += 1\n return rv\n\n def render_toc(self):\n \"\"\"Render TOC to HTML.\n :param level: render toc to the given level\n \"\"\"\n return ''.join(self._iter_toc())\n\n def _iter_toc(self):\n last_level = None\n yield '\\n'\n\n\nclass TocRenderer(TocMixin, mistune.Renderer):\n pass\n", "sub_path": "app/tocutil.py", "file_name": "tocutil.py", "file_ext": "py", "file_size_in_byte": 2280, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "mistune.Renderer", "line_number": 71, "usage_type": "attribute"}]} +{"seq_id": "574377819", "text": "import re\n\nfrom bs4 import BeautifulSoup\n\nfrom common import get_session, itemschema_ldjson, message\n\npat = re.compile(r'^(https?://(www|get|cyber)\\.lider\\.cl/(catalogo|supermercado)/product/[\\w\\-]+/([0-9]+)/?)')\napi_base = 'https://buysmart-landing-bff-production.lider.cl/buysmart-checkout-bff/products/' \\\n '?sku=SKU&appId=BuySmart&ts=1558232563101'\nimg_base = 'https://images.lider.cl/wmtcl?source=url[file:/productos/SKUIMAGE]&sink'\n\n\ndef parse(url):\n url_items = list(pat.findall(url)[0])\n\n if url_items[2] == 'catalogo':\n with get_session() as s:\n data = s.get(api_base.replace('SKU', url_items[3])).json()\n if not len(data):\n return message(code='product_not_found0')\n\n data = data[0]\n name = data['displayName']\n price = data['price']['BasePriceReference']\n price_sale = data['price']['BasePriceSales'] or price\n price_card = data['price']['BasePriceTLMC'] or price_sale\n image = img_base.replace('SKU', url_items[3]).replace('IMAGE', data['imagesAvailables'][0])\n else:\n with get_session() as s:\n resp = s.get(url)\n\n data_html = resp.text.replace('/*', '').replace('*/', '')\n dom = BeautifulSoup(data_html, 'html.parser')\n data = itemschema_ldjson(dom)\n name = data['name'] + ' ' + data['brand']\n price = int(data['offers']['highPrice'])\n price_sale = int(data['offers'].get('lowPrice', price))\n price_card = price_sale\n image = data['image']\n\n return dict(\n url=url,\n name=name,\n price=price,\n price_sale=price_sale,\n price_card=price_card,\n image=image,\n raw=data\n )\n", "sub_path": "stores/lider.py", "file_name": "lider.py", "file_ext": "py", "file_size_in_byte": 1705, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "re.compile", "line_number": 7, "usage_type": "call"}, {"api_name": "common.get_session", "line_number": 17, "usage_type": "call"}, {"api_name": "common.message", "line_number": 20, "usage_type": "call"}, {"api_name": "common.get_session", "line_number": 29, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 33, "usage_type": "call"}, {"api_name": "common.itemschema_ldjson", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "5898631", "text": "import datetime\nn ='rec__' +str(datetime.datetime.now())\nprint(n[:10])\na = n.replace('-','_')\nprint(a)\nfrom pymongo import MongoClient\n#url = 'mongodb+srv://harkishen:Bbsr131@cluster0-zmd3i.mongodb.net/test?retryWrites=true'\nurl = 'mongodb://127.0.0.1:27017'\ndbn = 'test'\nclient = MongoClient(url)\ndb = client[dbn]\ncoll = db['these']\nobj = {\n\t'name':'harkishen singh',\n\t'branch':'computer science and engineering',\n\t'college':'college of engineering and technology',\n\t'place':'bhubaneswar',\n\t'native':'amritsar, punjab',\n\t'country':'india'\n}\ncoll.insert_one(obj)\nprint('Inserted !')", "sub_path": "tests.py", "file_name": "tests.py", "file_ext": "py", "file_size_in_byte": 582, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "datetime.datetime.now", "line_number": 2, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 2, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "18390266", "text": "#!/usr/bin/env python\n\nimport argparse as ap\nimport os\nimport time\nfrom spt3g import core\nfrom spt3g.cluster.condor_tools import condor_submit\n\nuser = os.getenv('LOGNAME')\ndefault_jobname = 'condor_{}'.format(time.strftime('%Y%m%d_%H%M%S'))\ndefault_log_root = os.path.join('/scratch', user)\ndefault_output_root = os.path.join('/spt/user', user)\n\ndefault_grid_proxy = os.getenv('X509_USER_PROXY')\n\nP = ap.ArgumentParser(\n description='Submit job to Condor queue on scott/amundsen',\n formatter_class=ap.ArgumentDefaultsHelpFormatter)\nP.add_argument('script', help='User script to run')\nP.add_argument('--args', nargs='+', default=[],\n help='Arguments passed to the user script')\nP.add_argument('--caller', default='python',\n help='Program that calls the user script')\nP.add_argument('--jobname', default=default_jobname,\n help='Name of the submit job. This is used to create the log and '\n 'output directories, and to tag the input software.')\nP.add_argument('--log-root', default=default_log_root,\n help='Local directory where the log files are stored. '\n 'This should be a location on /scratch, since these are typically '\n 'lots of small files')\nP.add_argument('--output-root', default=default_output_root,\n help='Local directory where the output files are stored. '\n 'This should be a location on cephfs (/spt), since output files '\n 'must be transfered using GridFTP')\nP.add_argument('--input-files', nargs='+', default=[],\n help='Files to transfer into the job using GridFTP. '\n 'Paths must be absolute paths on the cephfs file system (/spt).')\nP.add_argument('--output-files', nargs='+', default=[],\n help='Files created by the input command that are to be '\n 'transfered back to the `output_root` using GridFTP. Paths must be '\n 'relative to the remote working directory where the command is run.')\nP.add_argument('--grid-proxy', default=default_grid_proxy,\n help='Path to a valid grid proxy file. Required if any '\n 'input or output files are supplied.')\nP.add_argument('--aux-input-files', nargs='+', default=[],\n help='Small files to transfer with the submit script. '\n 'The command script and grid proxy are automatically '\n 'added to this list.')\nP.add_argument('--aux-output-files', nargs='+', default=[],\n help='Small files to transfer to `log_root` on job completion. '\n 'Paths must be relative to the remote working directory.')\nP.add_argument('--queue', default='1',\n help='The job queue specification, either an integer '\n 'number of job copies or a specification string -- see '\n 'Condor documentation.')\nP.add_argument('--requirements', default='((OSGVO_OS_STRING == \"RHEL 6\") && '\n '(GLIDEIN_ResourceName =!= \"NPX\"))',\n help='Computing requirements specification')\nP.add_argument('--request-cpus', type=int, default=1,\n help='Number of CPUs to request for the job')\nP.add_argument('--request-memory', type=int, default=2,\n help='Memory required, in GB')\nP.add_argument('--request-disk', type=int, default=1,\n help='Disk space required, in GB')\nP.add_argument('--no-spt3g-env', dest='spt3g_env', action='store_false',\n default=True, help='Do not use the spt3g_software code '\n 'and environment when running the job.')\nP.add_argument('--when-to-transfer-output', default='ON_EXIT',\n help='When the `aux_output_files` are to be transfered '\n 'back to the local `log_root`.')\nP.add_argument('--python3', default=False, action='store_true',\n help='Use the python3 CVMFS environment, rather than python2.')\nP.add_argument('--user-code', default='',\n help='User code to add to the run script')\nP.add_argument('--create-only', default=False, action='store_true',\n help='Create the submit and shell scripts for running the job, '\n 'but do not submit them. Useful for debugging.')\n\nargs = P.parse_args()\n\nargs.request_memory *= core.G3Units.GB\nargs.request_disk *= core.G3Units.GB\n\ncondor_submit(**vars(args))\n", "sub_path": "cluster/bin/spt3g-condor-submit.py", "file_name": "spt3g-condor-submit.py", "file_ext": "py", "file_size_in_byte": 4367, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.getenv", "line_number": 9, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 14, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 18, "usage_type": "attribute"}, {"api_name": "spt3g.core.G3Units", "line_number": 81, "usage_type": "attribute"}, {"api_name": "spt3g.core", "line_number": 81, "usage_type": "name"}, {"api_name": "spt3g.core.G3Units", "line_number": 82, "usage_type": "attribute"}, {"api_name": "spt3g.core", "line_number": 82, "usage_type": "name"}, {"api_name": "spt3g.cluster.condor_tools.condor_submit", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "491593015", "text": "from __future__ import print_function\n\nimport os\nimport tarfile\nimport tempfile\nimport shutil\n\nimport boto3\n\n\ndef _make_tarfile(output_filename, source_dir):\n \"\"\"\n create a tar.gz from a directory.\n \"\"\"\n with tarfile.open(output_filename, \"w:gz\") as tar:\n for f in os.listdir(source_dir):\n tar.add(os.path.join(source_dir, f), arcname=f)\n\n\ndef _upload_s3(local_model_path, bucket, prefix):\n \"\"\"\n Upload dir to S3 as .tar.gz.\n :param local_model_path: local path to a dir.\n :param bucket: S3 bucket where to store the data.\n :param prefix: path within the bucket.\n :return:\n \"\"\"\n sess = boto3.Session()\n tmp_dir = tempfile.mkdtemp()\n try:\n model_data_file = os.path.join(tmp_dir, \"model.tar.gz\")\n _make_tarfile(model_data_file, local_model_path)\n s3 = boto3.client('s3')\n with open(model_data_file, 'rb') as fobj:\n key = os.path.join(prefix, 'model.tar.gz')\n obj = sess.resource('s3').Bucket(bucket).Object(key)\n obj.upload_fileobj(fobj)\n obj.Acl().put(ACL='public-read')\n response = s3.put_object_tagging(\n Bucket=bucket,\n Key=key,\n Tagging={'TagSet': [{'Key': 'SageMaker', 'Value': 'true'}, ]}\n )\n print('tag response', response)\n return '{}/{}/{}'.format(s3.meta.endpoint_url, bucket, key)\n finally:\n shutil.rmtree(tmp_dir)\n\n\ndef _deploy(role, container_name, app_name, model_s3_path, run_id):\n \"\"\"\n Deploy model on sagemaker.\n :param role:\n :param container_name:\n :param app_name:\n :param model_s3_path:\n :param run_id:\n :return:\n \"\"\"\n sage_client = boto3.client('sagemaker', region_name=\"us-west-2\")\n ecr_client = boto3.client(\"ecr\")\n repository_conf = ecr_client.describe_repositories(\n repositoryNames=[container_name])['repositories'][0]\n model_name = app_name + '-model'\n model_response = sage_client.create_model(\n ModelName=model_name,\n PrimaryContainer={\n 'ContainerHostname': 'mlflow-serve-%s' % model_name,\n 'Image': repository_conf[\"repositoryUri\"],\n 'ModelDataUrl': model_s3_path,\n 'Environment': {},\n },\n ExecutionRoleArn=role,\n # sagemaker.get_execution_role(), # for accessing model artifacts &\n # docker image. it was made with AmazonSageMakerFullAccess policy. the\n # model object in S3 is tagged with SageMaker=true, which means this role\n # can access it (per the policy).\n Tags=[{'Key': 'run_id', 'Value': str(run_id)}, ],\n )\n print(\"model_arn: %s\" % model_response[\"ModelArn\"])\n config_name = app_name + \"-config\"\n endpoint_config_response = sage_client.create_endpoint_config(\n EndpointConfigName=config_name,\n ProductionVariants=[\n {\n 'VariantName': 'model1',\n 'ModelName': model_name, # is this the unique identifier for Model?\n 'InitialInstanceCount': 1,\n 'InstanceType': 'ml.m4.xlarge',\n 'InitialVariantWeight': 1,\n },\n ],\n Tags=[\n {\n 'Key': 'app_name',\n 'Value': app_name,\n },\n ],\n )\n print(\"endpoint_config_arn: %s\" % endpoint_config_response[\"EndpointConfigArn\"])\n endpoint_response = sage_client.create_endpoint(\n EndpointName=app_name,\n EndpointConfigName=config_name,\n Tags=[],\n )\n print(\"endpoint_arn: %s\" % endpoint_response[\"EndpointArn\"])\n", "sub_path": "mlflow/sagemaker/deploy.py", "file_name": "deploy.py", "file_ext": "py", "file_size_in_byte": 3608, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "tarfile.open", "line_number": 15, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "boto3.Session", "line_number": 28, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "boto3.client", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path", "line_number": 35, "usage_type": "attribute"}, {"api_name": "shutil.rmtree", "line_number": 47, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 60, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "183527708", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 14 14:44:37 2021\r\n\r\n@author: aksha\r\n\"\"\"\r\n\r\n#XGBoost tuning\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 14 12:16:43 2021\r\n\r\n@author: aksha\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport seaborn as sns\r\nfrom sklearn import preprocessing\r\nfrom sklearn import metrics\r\nimport xgboost as xgb\r\nfrom sklearn.metrics import classification_report, confusion_matrix\r\n\r\ndf = pd.read_csv(\"train.csv\", index_col=(8));\r\n\r\ncity = df.pop('city')\r\n\r\nprint(\"NaN values in the dataset:\\n\" , df.isna().sum() )\r\n\r\nsns.pairplot(df[['estimated blast radius(km)' , 'temperature(C)', 'distance from nearest carrier(nm)', 'military-to-civilian ratio', 'population density', 'relative humidity', 'squadron strength' ]], diag_kind='kde')\r\nprint(df.describe().transpose()[['mean', 'std']])\r\n\r\nfeatures = np.array(df)\r\nlabels = np.array(city)\r\n\r\nfor i in range(len(labels)):\r\n if labels[i] == 7:\r\n labels[i] = 0\r\n \r\n \r\n\r\n\r\ndtrain = xgb.DMatrix(data=features, label=labels)\r\n\r\n\r\nparams = {\r\n 'booster': 'dart',\r\n 'max_depth': 5,\r\n 'objective': 'multi:softmax',\r\n 'num_class': 7,\r\n 'n_gpus': 0,\r\n 'subsample': 0.8,\r\n 'lambda': 1,\r\n 'sample_type': 'uniform',\r\n 'rate_drop': 0,\r\n 'normalize_type': 'forest',\r\n 'gamma': 0,\r\n 'min_child_weight': 0.5\r\n}\r\nbst = xgb.train(params, dtrain)\r\n\r\n\r\ndf = pd.read_csv(\"test.csv\")\r\nindex = df.pop(\"Id\")\r\ntrain_set = np.array(df)\r\nindex = np.array(index)\r\n\r\n\r\n\r\ndtest = xgb.DMatrix(data=train_set)\r\n\r\n\r\npred_train = bst.predict(dtrain)\r\n\r\nprint(classification_report(labels, pred_train))\r\n\r\npred = bst.predict(dtest, ntree_limit=15)\r\n\r\ndf = pd.read_csv(\"test.csv\")\r\nindex = df.pop(\"Id\")\r\n\r\nfor i in range(len(pred)):\r\n if pred[i] == 0:\r\n pred[i] = 7\r\n\r\ntrain_set = np.array(df)\r\nindex = np.array(index)\r\n\r\ny_hat = pred.astype(int)\r\nindex = index.astype(int)\r\n\r\noutput = np.concatenate((index.reshape(-1,1), y_hat.reshape(-1,1)), axis = 1)\r\n\r\n\r\noutdf= pd.DataFrame(output)\r\noutdf.columns = [\"Id\", \"city\"]\r\noutdf.to_csv(\"submit.csv\")\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n", "sub_path": "Day3/Tuning XGBoost.py", "file_name": "Tuning XGBoost.py", "file_ext": "py", "file_size_in_byte": 2097, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pandas.read_csv", "line_number": 25, "usage_type": "call"}, {"api_name": "seaborn.pairplot", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 35, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 44, "usage_type": "call"}, {"api_name": "xgboost.train", "line_number": 61, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 67, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 71, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 76, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 93, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "539850017", "text": "#!/usr/bin/python3\n\"\"\"RZFeeser || Alta3 Research\nTracking student inventory within a sqliteDB accessed\nvia Flask APIs\"\"\"\n\n# standard library\nimport sqlite3 as sql\n\n# python3 -m pip install flask\nfrom flask import Flask\nfrom flask import render_template\nfrom flask import request\n\napp = Flask(__name__)\n\n# return home.html (landing page)\n@app.route('/')\ndef home():\n return render_template('home.html')\n\n# return student.html (a way to add a student to our sqliteDB)\n@app.route('/enternew')\ndef new_student():\n return render_template('student.html')\n\n# if someone uses student.html it will generate a POST\n# this post will be sent to /addrec\n# where the information will be added to the sqliteDB\n@app.route('/addrec',methods = ['POST'])\ndef addrec():\n try:\n nm = request.form['nm'] # student name\n addr = request.form['addr'] # student street address\n city = request.form['city'] # student city\n pin = request.form['pin'] # \"pin\" assigned to student\n # (\"pin\" is just an example of meta data we want to track)\n\n # connect to sqliteDB\n with sql.connect(\"database.db\") as con:\n cur = con.cursor()\n\n # place the info from our form into the sqliteDB\n cur.execute(\"INSERT INTO students (name,addr,city,pin) VALUES (?,?,?,?)\",(nm,addr,city,pin) )\n # commit the transaction to our sqliteDB\n con.commit()\n # if we have made it this far, the record was successfully added to the DB\n msg = \"Record successfully added\"\n \n except:\n con.rollback() # this is the opposite of a commit()\n msg = \"error in insert operation\" # we were NOT successful\n\n finally:\n con.close() # successful or not, close the connection to sqliteDB\n return render_template(\"result.html\",msg = msg) #\n\n@app.route('/removerec')\ndef delete_student():\n return render_template('remove.html')\n\n@app.route('/remove',methods = ['DELETE'])\ndef deleterec():\n try:\n nm = request.form['nm'] # student name\n \n # connect to sqliteDB\n with sql.connect(\"database.db\") as con:\n cur = con.cursor()\n\n # place the info from our form into the sqliteDB\n cur.execute(\"DELETE FROM students WHERE (name) VALUES (?)\",(nm) )\n # commit the transaction to our sqliteDB\n con.commit()\n # if we have made it this far, the record was successfully added to the DB\n msg = \"Record successfully deleted\"\n\n except:\n con.rollback() # this is the opposite of a commit()\n msg = \"error in delete operation\" # we were NOT successful\n\n finally:\n con.close() # successful or not, close the connection to sqliteDB\n return render_template(\"list.html\",msg = msg)\n\n# return all entries from our sqliteDB as HTML\n@app.route('/list')\ndef list_students():\n con = sql.connect(\"database.db\")\n con.row_factory = sql.Row\n \n cur = con.cursor()\n cur.execute(\"SELECT * from students\") # pull all information from the table \"students\"\n \n rows = cur.fetchall()\n return render_template(\"list.html\",rows = rows) # return all of the sqliteDB info as HTML\n\nif __name__ == '__main__':\n try:\n # ensure the sqliteDB is created\n con = sql.connect('database.db')\n print(\"Opened database successfully\")\n # ensure that the table students is ready to be written to\n con.execute('CREATE TABLE IF NOT EXISTS students (name TEXT, addr TEXT, city TEXT, pin TEXT)')\n print(\"Table created successfully\")\n con.close()\n # begin Flask Application \n app.run(host=\"0.0.0.0\", port=2224, debug = True)\n except:\n print(\"App failed on boot\")\n\n", "sub_path": "myapp/server01.py", "file_name": "server01.py", "file_ext": "py", "file_size_in_byte": 3811, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "flask.Flask", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 32, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 33, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 34, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 34, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 35, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 35, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 55, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 64, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 64, "usage_type": "name"}, {"api_name": "sqlite3.connect", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 83, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 88, "usage_type": "call"}, {"api_name": "sqlite3.Row", "line_number": 89, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 95, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "63810635", "text": "from __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nimport datetime # For datetime objects\n\n# Import the backtrader platform\nimport backtrader as bt\nfrom customCSV import customCSVminutes, customCSVdays\n\n\nclass TestStrategy(bt.Strategy):\n\n def log(self, txt, dt=None):\n dt = dt or self.datas[0].datetime.date(0)\n print('%s, %s' % (dt.isoformat(), txt))\n\n def __init__(self):\n self.dataclose = self.datas[0].close\n\n def next(self):\n # print('here')\n self.log('Close, %.2f' % self.dataclose[0])\n\n\nif __name__ == '__main__':\n\n cerebro = bt.Cerebro()\n\n # Add a strategy\n cerebro.addstrategy(TestStrategy)\n\n data = customCSVminutes( dataname= './datas/FDAX_1M.csv',\n fromdate = datetime.datetime(2010, 1, 1),\n todate = datetime.datetime(2010, 2, 3),\n timeframe = bt.TimeFrame.Minutes,\n )\n\n # data = customCSVdays(dataname='./datas/ES_1D.csv',\n # fromdate=datetime.datetime(2007, 1, 1),\n # todate=datetime.datetime(2019, 1, 30),\n # timeframe=bt.TimeFrame.Days\n # )\n\n # Add the Data Feed to Cerebro\n cerebro.adddata(data)\n\n # Set our desired cash start\n cerebro.broker.setcash(100000.0)\n\n # Print out the starting conditions\n print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())\n\n # Run over everything now\n cerebro.run()\n\n # Print out the final result\n print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())", "sub_path": "quickstart.py", "file_name": "quickstart.py", "file_ext": "py", "file_size_in_byte": 1666, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "backtrader.Strategy", "line_number": 11, "usage_type": "attribute"}, {"api_name": "backtrader.Cerebro", "line_number": 27, "usage_type": "call"}, {"api_name": "customCSV.customCSVminutes", "line_number": 32, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 34, "usage_type": "call"}, {"api_name": "backtrader.TimeFrame", "line_number": 35, "usage_type": "attribute"}]} +{"seq_id": "468097245", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('doslos', '0003_auto_20151017_1343'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='category',\n name='parent',\n field=models.ForeignKey(blank=True, to='doslos.Category', null=True),\n ),\n ]\n", "sub_path": "doslos_server/doslos/migrations/0004_auto_20151017_1346.py", "file_name": "0004_auto_20151017_1346.py", "file_ext": "py", "file_size_in_byte": 434, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "14964504", "text": "import torch\nimport torch.nn as nn\nfrom torch.nn import TransformerEncoder, TransformerEncoderLayer\nfrom pdb import set_trace as bp \n#################added break point accessor####################\n\n################### ''' large kernel = (10,3) with 32 filters with LSTM''' ######################\nclass Net (nn.Module):\n def __init__(self):\n super(Net,self).__init__()\n\t#from torch.nn import TransformerEncoder, TransformerEncoderLayer\n self.conv1 = nn.Conv2d(1,32,kernel_size=(41,3),padding=(20,1))\n #self.drop1 = nn.Dropout(0.2)\n self.conv2 = nn.Conv2d(32,32,kernel_size=(41,3),padding=(20,1))\n #self.drop2 = nn.Dropout(0.2)\n self.conv3 = nn.Conv2d(32,64,kernel_size=(21,5),padding=(10,2))\n #self.drop3 = nn.Dropout(0.2)\n self.conv4 = nn.Conv2d(64,64,kernel_size=(21,5),padding=(10,2))\n #self.drop4 = nn.Dropout(0.2)\n self.lstm1 = nn.LSTM(36*64, 1024 , batch_first=True) \n self.lstm2 = nn.LSTM(1024, 36 , batch_first=True)\n self.relu = nn.ReLU()\n\n\t#Added the Transformer on 14/02/2021 {Happy Valentines Day :p}\n\n\t#Creating the instance of TransformerEncoderLayer() class\n encoder_layer = TransformerEncoderLayer(36*64,8)\n self.transformer_encoder = TransformerEncoder(encoder_layer,4)\n self.decoder = nn.Linear(36*64,36)\n\n def forward(self, x):\n x = (self.relu(self.conv1(x)))\n x = (self.relu(self.conv2(x)))\n x = (self.relu(self.conv3(x)))\n x = (self.relu(self.conv4(x)))\n x = x.transpose(1,2)\n #bp() \n x = torch.reshape(x,(-1,800,36*64))\n x = self.transformer_encoder(x)\n x = self.decoder(x)\n #x,_ = self.lstm1(x)\n #x,_ = self.lstm2(x) \n return x\n\n \n \n", "sub_path": "ENV_estimation/Matlab_env_training/steps_torch_env_BEGAN/NET/Net_CNN_2LSTM_padding_64filters_encoder_transformer.py", "file_name": "Net_CNN_2LSTM_padding_64filters_encoder_transformer.py", "file_ext": "py", "file_size_in_byte": 1769, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "torch.nn.Module", "line_number": 8, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 8, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 16, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.LSTM", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.TransformerEncoderLayer", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn.TransformerEncoder", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.reshape", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "62889938", "text": "import os\nimport re\n\nfrom tqdm import tqdm\n\n\ndef create_experiment_dir(outdir):\n assert os.path.isdir(outdir)\n max_id = -1\n for name in os.listdir(outdir):\n match = re.match(r'^(\\d+).*$', name)\n if match:\n max_id = max(max_id, int(match.group(1)))\n id_ = max_id + 1\n exp_dir = os.path.join(outdir, str(id_))\n os.mkdir(exp_dir)\n return exp_dir\n\n\ndef get_tqdm(values, desc, enabled=True):\n if enabled:\n return tqdm(values, desc=desc)\n else:\n return values\n\n\n################################################\n# Geometry\n\ndef rect_area(r):\n \"\"\"Return the area of a rectangle.\n\n Args:\n r: an object with attributes left, top, width, height\n Returns:\n float\n \"\"\"\n return float(r.width) * float(r.height)\n\n\ndef rect_overlap(r1, r2):\n \"\"\"Return the area of the intersection of two rectangles.\n\n Args:\n r1: an object with attributes left, top, width, height\n r2: an object with attributes left, top, width, height\n Returns:\n float\n \"\"\"\n left = float(max(r1.left, r2.left))\n right = float(min(r1.left + r1.width, r2.left + r2.width))\n top = float(max(r1.top, r2.top))\n bottom = float(min(r1.top + r1.height, r2.top + r2.height))\n if left >= right or top >= bottom:\n return 0.\n return (right - left) * (bottom - top)\n\n\n################################################\n# Statistics\n\nclass Stats(object):\n \n def __init__(self):\n self.n = 0\n self.loss = 0.\n self.accuracy = 0.\n self.area_f1 = 0.\n self.oracle = 0.\n self.str_acc = 0.\n self.grad_norm = 0.\n\n def add(self, stats):\n \"\"\"Add another Stats to this one.\"\"\"\n self.n += stats.n\n self.loss += stats.loss\n self.accuracy += stats.accuracy\n self.area_f1 += stats.area_f1\n self.oracle += stats.oracle\n self.str_acc += stats.str_acc\n self.grad_norm = max(self.grad_norm, stats.grad_norm)\n\n def __repr__(self):\n n = max(1, self.n) * 1.\n return '(n={}, loss={}, accuracy={}, area_f1={}, oracle={}, str_acc={}, grad_norm={})'.format(\n self.n, self.loss / n, self.accuracy / n, self.area_f1 / n,\n self.oracle / n, self.str_acc / n, self.grad_norm)\n __str__ = __repr__\n\n def log(self, tb_logger, step, prefix='', ignore_grad_norm=False):\n \"\"\"Log to TensorBoard.\"\"\"\n n = float(self.n)\n tb_logger.log_value(prefix + 'loss', self.loss / n, step)\n tb_logger.log_value(prefix + 'accuracy', self.accuracy / n, step)\n tb_logger.log_value(prefix + 'area_f1', self.area_f1 / n, step)\n tb_logger.log_value(prefix + 'oracle', self.oracle / n, step)\n tb_logger.log_value(prefix + 'str_acc', self.str_acc / n, step)\n if not ignore_grad_norm:\n tb_logger.log_value(prefix + 'grad_norm', self.grad_norm, step)\n", "sub_path": "phrasenode/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.path.isdir", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 10, "usage_type": "call"}, {"api_name": "re.match", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 16, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "211935213", "text": "from flask import render_template, flash, url_for, redirect, request, abort\nfrom jinja2 import FileSystemLoader\n\nfrom application import app\n\nimport os\n\ndef reverse_url(site_nickname):\n \"\"\"Construct the function that returns reverse URLs.\"\"\"\n return lambda url_name: app.config[\"REVERSE_URLS\"][site_nickname][url_name]\n\ndef get_site_nickname():\n \"\"\"Get the site's nickname from the domain.\"\"\"\n # Get the domain name and site nickname from the config.\n domain = request.host.split(\":\")[0]\n site_nickname = app.config[\"SITE_CONFIGURATION\"].get(domain, None)\n return site_nickname\n\ndef page_handler(path):\n # Normalise the path.\n if not path.endswith(\"/\"):\n return redirect(path + \"/\")\n elif not path.startswith(\"/\"):\n path = \"/\" + path\n\n site_nickname = get_site_nickname()\n if site_nickname is None:\n abort(404)\n\n try:\n template_name = app.config[\"URLS\"][site_nickname][path][0]\n except KeyError:\n abort(404)\n\n # Change the template loading directory.\n template_directory = os.path.join(app.root_path, \"templates/\", site_nickname)\n app.jinja_loader = FileSystemLoader(template_directory)\n media_url = \"/media/%s/\" % site_nickname\n return render_template(template_name, **{\"MEDIA_URL\": media_url, \"url\": reverse_url(site_nickname), \"debug\": app.config.get(\"DEBUG\", False)})\n\ndef index_handler():\n # Flask can't handle the root URL with a path rule.\n return page_handler(\"/\")\n\ndef warmup():\n return ''\n\n\n## Error handlers\n# Handle 404 errors\n@app.errorhandler(404)\ndef page_not_found(e):\n site_nickname = get_site_nickname()\n if site_nickname is None:\n site_nickname = \"\"\n template_directory = os.path.join(app.root_path, \"templates/\", site_nickname)\n app.jinja_loader = FileSystemLoader(template_directory)\n media_url = \"/media/%s/\" % site_nickname\n return render_template('404.html', **{\"MEDIA_URL\": media_url, \"url\": reverse_url(site_nickname), \"debug\": app.config.get(\"DEBUG\", False)}), 404\n\n# Handle 500 errors\n@app.errorhandler(500)\ndef server_error(e):\n return render_template('500.html'), 500\n\n", "sub_path": "application/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2128, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "application.app.config", "line_number": 10, "usage_type": "attribute"}, {"api_name": "application.app", "line_number": 10, "usage_type": "name"}, {"api_name": "flask.request.host.split", "line_number": 15, "usage_type": "call"}, {"api_name": "flask.request.host", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "application.app.config", "line_number": 16, "usage_type": "attribute"}, {"api_name": "application.app", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 28, "usage_type": "call"}, {"api_name": "application.app.config", "line_number": 31, "usage_type": "attribute"}, {"api_name": "application.app", "line_number": 31, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "application.app.root_path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "application.app", "line_number": 36, "usage_type": "name"}, {"api_name": "application.app.jinja_loader", "line_number": 37, "usage_type": "attribute"}, {"api_name": "application.app", "line_number": 37, "usage_type": "name"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 39, "usage_type": "call"}, {"api_name": "application.app.config.get", "line_number": 39, "usage_type": "call"}, {"api_name": "application.app.config", "line_number": 39, "usage_type": "attribute"}, {"api_name": "application.app", "line_number": 39, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 56, "usage_type": "call"}, {"api_name": "os.path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "application.app.root_path", "line_number": 56, "usage_type": "attribute"}, {"api_name": "application.app", "line_number": 56, "usage_type": "name"}, {"api_name": "application.app.jinja_loader", "line_number": 57, "usage_type": "attribute"}, {"api_name": "application.app", "line_number": 57, "usage_type": "name"}, {"api_name": "jinja2.FileSystemLoader", "line_number": 57, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 59, "usage_type": "call"}, {"api_name": "application.app.config.get", "line_number": 59, "usage_type": "call"}, {"api_name": "application.app.config", "line_number": 59, "usage_type": "attribute"}, {"api_name": "application.app", "line_number": 59, "usage_type": "name"}, {"api_name": "application.app.errorhandler", "line_number": 51, "usage_type": "call"}, {"api_name": "application.app", "line_number": 51, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 64, "usage_type": "call"}, {"api_name": "application.app.errorhandler", "line_number": 62, "usage_type": "call"}, {"api_name": "application.app", "line_number": 62, "usage_type": "name"}]} +{"seq_id": "281453677", "text": "from datetime import datetime\nimport xlrd\nfrom xlrd.biffh import XLRDError\n\nfrom messytables.core import RowSet, TableSet, Cell\nfrom messytables.types import (StringType, IntegerType,\n DateType, FloatType)\nfrom messytables.error import ReadError\n\n\nXLS_TYPES = {\n 1: StringType(),\n # NB: Excel does not distinguish floats from integers so we use floats\n # We could try actual type detection between floats and ints later\n # or use the excel format string info - see\n # https://groups.google.com/forum/?fromgroups=#!topic/\n # python-excel/cAQ1ndsCVxk\n 2: FloatType(),\n 3: DateType(None),\n # this is actually boolean but we do not have a boolean type yet\n 4: IntegerType()\n}\n\n\nclass XLSTableSet(TableSet):\n \"\"\"An excel workbook wrapper object.\n \"\"\"\n\n def __init__(self, fileobj=None, filename=None,\n window=None, encoding=None):\n '''Initilize the tableset.\n\n :param encoding: passed on to xlrd.open_workbook function\n as encoding_override\n '''\n self.window = window\n try:\n if filename:\n self.workbook = xlrd.open_workbook(filename,\n encoding_override=encoding)\n elif fileobj:\n self.workbook = xlrd.open_workbook(\n file_contents=fileobj.read(),\n encoding_override=encoding)\n else:\n raise Exception('You must provide one of filename or fileobj')\n except XLRDError:\n raise ReadError(\"Unsupported Excel format, or corrupt file\")\n\n @property\n def tables(self):\n \"\"\" Return the sheets in the workbook. \"\"\"\n return [XLSRowSet(name, self.workbook.sheet_by_name(name), self.window)\n for name in self.workbook.sheet_names()]\n\n\nclass XLSRowSet(RowSet):\n \"\"\" Excel support for a single sheet in the excel workbook. Unlike\n the CSV row set this is not a streaming operation. \"\"\"\n\n def __init__(self, name, sheet, window=None):\n self.name = name\n self.sheet = sheet\n self.window = window or 1000\n super(XLSRowSet, self).__init__(typed=True)\n\n def raw(self, sample=False):\n \"\"\" Iterate over all rows in this sheet. Types are automatically\n converted according to the excel data types specified, including\n conversion of excel dates, which are notoriously buggy. \"\"\"\n num_rows = self.sheet.nrows\n for i in xrange(min(self.window, num_rows) if sample else num_rows):\n row = []\n for j, cell in enumerate(self.sheet.row(i)):\n value = cell.value\n type = XLS_TYPES.get(cell.ctype, StringType())\n if type == DateType(None):\n if value == 0:\n raise ValueError('Invalid date at \"%s\":%d,%d' % (\n self.sheet.name, j + 1, i + 1))\n year, month, day, hour, minute, second = \\\n xlrd.xldate_as_tuple(value, self.sheet.book.datemode)\n value = datetime(year, month, day, hour,\n minute, second)\n row.append(Cell(value, type=type))\n yield row\n", "sub_path": "messytables/excel.py", "file_name": "excel.py", "file_ext": "py", "file_size_in_byte": 3289, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "messytables.types.StringType", "line_number": 12, "usage_type": "call"}, {"api_name": "messytables.types.FloatType", "line_number": 18, "usage_type": "call"}, {"api_name": "messytables.types.DateType", "line_number": 19, "usage_type": "call"}, {"api_name": "messytables.types.IntegerType", "line_number": 21, "usage_type": "call"}, {"api_name": "messytables.core.TableSet", "line_number": 25, "usage_type": "name"}, {"api_name": "xlrd.open_workbook", "line_number": 39, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 42, "usage_type": "call"}, {"api_name": "xlrd.biffh.XLRDError", "line_number": 47, "usage_type": "name"}, {"api_name": "messytables.error.ReadError", "line_number": 48, "usage_type": "call"}, {"api_name": "messytables.core.RowSet", "line_number": 57, "usage_type": "name"}, {"api_name": "messytables.types.StringType", "line_number": 76, "usage_type": "call"}, {"api_name": "messytables.types.DateType", "line_number": 77, "usage_type": "call"}, {"api_name": "xlrd.xldate_as_tuple", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "call"}, {"api_name": "messytables.core.Cell", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "131434765", "text": "import graphene\nfrom graphene_django import DjangoObjectType\nfrom graphql_jwt.decorators import login_required\nfrom healthid.apps.notifications.models import Notification, NotificationMeta\nfrom graphql import GraphQLError\nfrom healthid.utils.messages.notifications_responses import\\\n NOTIFICATION_ERROR_RESPONSES\n\n\nclass NotificationMetaType(DjangoObjectType):\n class Meta:\n model = NotificationMeta\n\n\nclass NotificationType(DjangoObjectType):\n class Meta:\n model = Notification\n\n notification_meta = graphene.List(NotificationMetaType)\n\n def resolve_notification_meta(self, info, **kwargs):\n \"\"\"\n get meta data of a notification\n Returns:\n list: meta data of a single notification\n \"\"\"\n return self.get_notification_meta\n\n\nclass Query(graphene.ObjectType):\n \"\"\"\n Queries notification messages where the user is a recipient\n\n returns:\n notifications: list of 'Notification' objects\n \"\"\"\n notifications = graphene.List(NotificationType, status=graphene.String())\n\n @login_required\n def resolve_notifications(self, info, **kwargs):\n user = info.context.user\n status = kwargs.get('status')\n notifications = Notification.objects.filter(\n user=user, status__iexact=status) \\\n if status else Notification.objects.filter(user=user)\n if notifications:\n return notifications\n raise GraphQLError(NOTIFICATION_ERROR_RESPONSES[\"empty_notifications\"])\n", "sub_path": "healthid/apps/notifications/schema/notification_queries.py", "file_name": "notification_queries.py", "file_ext": "py", "file_size_in_byte": 1510, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "graphene_django.DjangoObjectType", "line_number": 10, "usage_type": "name"}, {"api_name": "healthid.apps.notifications.models.NotificationMeta", "line_number": 12, "usage_type": "name"}, {"api_name": "graphene_django.DjangoObjectType", "line_number": 15, "usage_type": "name"}, {"api_name": "healthid.apps.notifications.models.Notification", "line_number": 17, "usage_type": "name"}, {"api_name": "graphene.List", "line_number": 19, "usage_type": "call"}, {"api_name": "graphene.ObjectType", "line_number": 30, "usage_type": "attribute"}, {"api_name": "graphene.List", "line_number": 37, "usage_type": "call"}, {"api_name": "graphene.String", "line_number": 37, "usage_type": "call"}, {"api_name": "healthid.apps.notifications.models.Notification.objects.filter", "line_number": 43, "usage_type": "call"}, {"api_name": "healthid.apps.notifications.models.Notification.objects", "line_number": 43, "usage_type": "attribute"}, {"api_name": "healthid.apps.notifications.models.Notification", "line_number": 43, "usage_type": "name"}, {"api_name": "healthid.apps.notifications.models.Notification.objects.filter", "line_number": 45, "usage_type": "call"}, {"api_name": "healthid.apps.notifications.models.Notification.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "healthid.apps.notifications.models.Notification", "line_number": 45, "usage_type": "name"}, {"api_name": "graphql.GraphQLError", "line_number": 48, "usage_type": "call"}, {"api_name": "healthid.utils.messages.notifications_responses.NOTIFICATION_ERROR_RESPONSES", "line_number": 48, "usage_type": "name"}, {"api_name": "graphql_jwt.decorators.login_required", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "375641925", "text": "# -*- coding: utf-8 -*-\nimport pdb\nimport re\n\nimport scrapy\n\nfrom WangyiMusicSpider.items import WangyimusicspiderItem\n\nheader = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate',\n 'Accept-Language': 'zh-CN,zh;q=0.9',\n 'Connection': 'keep-alive',\n 'Host': 'music.163.com',\n 'Referer': 'http://music.163.com/',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.63 Safari/537.36 Qiyu/2.1.1.1'\n}\n\n\nclass WangyiSpider(scrapy.Spider):\n name = 'wangyi'\n allowed_domains = ['music.163.com']\n start_urls = ['http://music.163.com/discover/playlist/']\n\n song_name = \"\"\n gd_name = \"\"\n player_num = \"\"\n album_name = \"\"\n song_ID = \"\"\n count = 0\n\n def parse(self, response):\n # 分类列表,不同风格的歌单\n classification_list = re.findall(r'下一页', response.text)):\n # next_page_url = 'http://music.163.com' + re.findall(r'下一页', response.text)[0]\n # yield scrapy.Request(next_page_url, callback=self.get_gd)\n # print(self.count)\n\n def song_list(self, response):\n music_list = response.xpath('//div[@id=\"song-list-pre-cache\"]//ul/li')\n for song in music_list:\n self.song_ID = song.xpath('./a/@href').extract()[0]\n self.song_name = song.xpath('./a/text()').extract()[0]\n url_single_list = 'http://music.163.com{}'.format(self.song_ID)\n self.song_ID = re.findall('\\d+', self.song_ID)[0]\n yield scrapy.Request(url_single_list, headers=header, callback=self.single_list)\n\n def single_list(self, response):\n sing_name = response.xpath('//p[@class=\"des s-fc4\"]/span/@title').extract()[0]\n album_name = response.xpath('//p[@class=\"des s-fc4\"]/a/text()').extract()[0]\n song_info = WangyimusicspiderItem()\n\n song_info['song_info_songid'] = self.song_ID\n song_info['song_info_songname'] = self.song_name\n song_info['song_info_singername'] = sing_name\n # song_info['竞品名'] = \"网易云音乐\"\n song_info['song_info_gdname'] = self.gd_name\n song_info['song_info_albumname'] = album_name\n song_info['song_info_playnum'] = self.player_num\n song_info['song_info_gdid'] = re.findall(r'\\d+', self.gd_id)[0]\n\n yield song_info\n", "sub_path": "day10/WangyiMusicSpider/WangyiMusicSpider/spiders/wangyi.py", "file_name": "wangyi.py", "file_ext": "py", "file_size_in_byte": 3437, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "scrapy.Spider", "line_number": 21, "usage_type": "attribute"}, {"api_name": "re.findall", "line_number": 35, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 38, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 48, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 61, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 62, "usage_type": "call"}, {"api_name": "WangyiMusicSpider.items.WangyimusicspiderItem", "line_number": 67, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 76, "usage_type": "call"}]} +{"seq_id": "287938911", "text": "# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/sqlflow/__main__.py\n# Compiled at: 2020-04-10 03:26:06\n# Size of source mod 2**32: 583 bytes\nimport argparse\nfrom sqlflow.client import Client\nparser = argparse.ArgumentParser()\nparser.add_argument('sql', nargs='+', type=str, help='sql', action='store')\nparser.add_argument('--url', type=str, help='server url', action='store', default=None)\nparser.add_argument('--ca_crt', type=str, help='Path to CA certificates of SQLFlow client.', action='store', default=None)\n\ndef main():\n args = parser.parse_args()\n client = Client(server_url=(args.url), ca_crt=(args.ca_crt))\n for sql in args.sql:\n print('executing: {}'.format(sql))\n print(client.execute(sql))", "sub_path": "pycfiles/sqlflow-0.10.0-py3.6/__main__.cpython-36.py", "file_name": "__main__.cpython-36.py", "file_ext": "py", "file_size_in_byte": 863, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlflow.client.Client", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "164888997", "text": "\"\"\"empty message\r\n\r\nRevision ID: b33fc1bcc892\r\nRevises: a23a8673ab13\r\nCreate Date: 2020-04-28 16:38:11.575091\r\n\r\n\"\"\"\r\nfrom alembic import op\r\nimport sqlalchemy as sa\r\n\r\n\r\n# revision identifiers, used by Alembic.\r\nrevision = 'b33fc1bcc892'\r\ndown_revision = 'a23a8673ab13'\r\nbranch_labels = None\r\ndepends_on = None\r\n\r\n\r\ndef upgrade():\r\n # ### commands auto generated by Alembic - please adjust! ###\r\n op.create_table('hall',\r\n sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),\r\n sa.Column('h_address_id', sa.Integer(), nullable=True),\r\n sa.Column('h_num', sa.Integer(), nullable=True),\r\n sa.Column('h_seate', sa.String(length=256), nullable=True),\r\n sa.ForeignKeyConstraint(['h_address_id'], ['cinema_address.id'], ),\r\n sa.PrimaryKeyConstraint('id'),\r\n sa.UniqueConstraint('h_num')\r\n )\r\n # ### end Alembic commands ###\r\n\r\n\r\ndef downgrade():\r\n # ### commands auto generated by Alembic - please adjust! ###\r\n op.drop_table('hall')\r\n # ### end Alembic commands ###\r\n", "sub_path": "migrations/versions/b33fc1bcc892_.py", "file_name": "b33fc1bcc892_.py", "file_ext": "py", "file_size_in_byte": 1019, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 25, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 26, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 27, "usage_type": "call"}, {"api_name": "sqlalchemy.UniqueConstraint", "line_number": 28, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 35, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 35, "usage_type": "name"}]} +{"seq_id": "138205365", "text": "\"\"\" setup module for packaging \"\"\"\n\nimport setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n LONG_DESCRIPTION = fh.read()\n\nsetuptools.setup(\n name=\"crowdstrike\",\n version=\"0.0.3\",\n author=\"James Hodgkinson\",\n author_email=\"yaleman@ricetek.net\",\n description=\"Crowdstrike interface to the Python API\",\n long_description=LONG_DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/yaleman/crowdstrike_api\",\n packages=setuptools.find_packages(),\n install_requires=[\n 'requests-oauthlib',\n 'loguru',\n ],\n classifiers=[\n \"Development Status :: 4 - Beta\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.7\",\n \"License :: OSI Approved :: MIT License\",\n \"Operating System :: OS Independent\",\n ],\n python_requires='>=3.7',\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 873, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "setuptools.setup", "line_number": 8, "usage_type": "call"}, {"api_name": "setuptools.find_packages", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "458249269", "text": "'''Normal force plotter\r\n\r\nCalculates the size of the normal force for a point like object with mass\r\ngiven in argument and creates two plots of the data.'''\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as mplot\r\n\r\ndef func(x):\r\n '''Function which describes our one-dimensional surface.'''\r\n return (x**2)*np.sin(x) + 1/x + 200\r\n\r\ndef fprime(x):\r\n '''Function which describes our function f differenatiated.'''\r\n return (x**2)*np.cos(x) + 2*x*np.sin(x) - 1/(x**2)\r\n\r\ndef plotNormalForce(mass):\r\n '''Function which creates two plots; one of distance against surface\r\nheight and another of distance against normal force.'''\r\n xstep = np.arange(1, 9, 0.01)\r\n\r\n xdata, ydata = [], []\r\n for step in xstep:\r\n yval = func(step)\r\n xdata.append(step)\r\n ydata.append(yval)\r\n mplot.plot(xdata, ydata)\r\n mplot.xlabel('Distance (m)')\r\n mplot.ylabel('Surface Height (cm)')\r\n mplot.savefig(\"q2u1305340a.pdf\", format='pdf')\r\n mplot.clf()\r\n\r\n xdata, ydata = [], []\r\n for step in xstep:\r\n yval = np.cos(np.arctan(fprime(step))) * float(mass) * 9.80665\r\n xdata.append(step)\r\n ydata.append(yval)\r\n mplot.plot(xdata, ydata)\r\n mplot.xlabel('Distance (m)')\r\n mplot.ylabel('Normal force (N)')\r\n mplot.savefig(\"q2u1305340b.pdf\", format='pdf')\r\n\r\nplotNormalForce(20)\r\n", "sub_path": "Computational Physics/q2u1306340.py", "file_name": "q2u1306340.py", "file_ext": "py", "file_size_in_byte": 1344, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.sin", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "numpy.cos", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.arctan", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 39, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 39, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "123942588", "text": "# -*-coding:UTF-8-*-\nfrom __future__ import print_function, absolute_import\n\nimport utils.Mytransforms as Mytransforms\nimport numpy as np\nimport random\nimport math\nimport json\nimport glob\nimport cv2\nimport os\n\nimport torch\nimport torch.utils.data as data\n\n\nfrom utils.extra_utils.osutils import *\nfrom utils.extra_utils.imutils import *\nfrom utils.extra_utils.transforms import *\n\n\ndef guassian_kernel(size_w, size_h, center_x, center_y, sigma):\n gridy, gridx = np.mgrid[0:size_h, 0:size_w]\n D2 = (gridx - center_x) ** 2 + (gridy - center_y) ** 2\n return np.exp(-D2 / 2.0 / sigma / sigma)\n\n\ndef getBoundingBox(img, kpt, height, width, stride):\n \n\n # print(kpt.shape)\n\n for person in range(kpt.shape[0]): \n x = np.zeros((kpt.shape[1],1))\n y = np.zeros((kpt.shape[1],1))\n for index in range(kpt.shape[1]):\n if float(kpt[person,index,1]) >= 0 or float(kpt[person,index,0]) >= 0:\n # print(person,index, kpt.shape, kpt[person,index], type(person), type(index))\n x[index] = [kpt[person,index,1]]\n y[index] = [kpt[person,index,0]]\n\n\n x_min = int(max(min(x), 0))\n x_max = int(min(max(x), width))\n y_min = int(max(min(y), 0))\n y_max = int(min(max(y), height))\n\n center_x = (x_min + x_max)/2\n center_y = (y_min + y_max)/2\n w = x_max - x_min\n h = y_max - y_min\n\n coord = []\n coord.append([min(int(center_y/stride),height/stride-1), min(int(center_x/stride),width/stride-1)])\n coord.append([min(int(y_min/stride),height/stride-1),min(int(x_min/stride),width/stride-1)])\n coord.append([min(int(y_min/stride),height/stride-1),min(int(x_max/stride),width/stride-1)])\n coord.append([min(int(y_max/stride),height/stride-1),min(int(x_min/stride),width/stride-1)])\n coord.append([min(int(y_max/stride),height/stride-1),min(int(x_max/stride),width/stride-1)])\n\n boxes = np.zeros((kpt.shape[0],int(height/stride), int(width/stride), 5), dtype=np.float32)\n for i in range(5):\n # resize from 368 to 46\n x = int(coord[i][0]) * 1.0\n y = int(coord[i][1]) * 1.0\n heat_map = guassian_kernel(size_h=height/stride, size_w=width/stride, center_x=x, center_y=y, sigma=3)\n heat_map[heat_map > 1] = 1\n heat_map[heat_map < 0.0099] = 0\n boxes[person,:, :, i] = heat_map\n\n box = np.sum(boxes,axis=0)\n\n return box\n\n\nclass PoseTrack_Data(data.Dataset):\n def __init__(self, is_train, root_dir, sigma, stride, transformer=None):\n self.inp_res = 368\n self.out_res = 46\n self.scale_factor = 0.25\n self.rot_factor = 30\n self.label_type = 'Gaussian'\n self.stride = stride\n self.transformer = transformer\n self.sigma = sigma\n self.root_dir = root_dir\n self.is_train = is_train\n self.transform = transform\n\n self.train_list, self.val_list = [], []\n\n self.train_dir = self.root_dir + 'images/train/'\n self.val_dir = self.root_dir + 'images/val/'\n self.test_dir = self.root_dir + 'images/test/'\n\n self.anno_train = self.root_dir + 'annotations/train/'\n self.anno_val = self.root_dir + 'annotations/val/'\n self.anno_test = self.root_dir + 'annotations/test/'\n\n\n self.labelFiles = {}\n self.img_List = {}\n self.box_head = {}\n self.keypoints = {}\n\n if is_train:\n self.labelFiles = [f for f in os.listdir(self.anno_train)]\n self.labelFiles.sort()\n\n count = 0\n masterCount = 0\n for i in range(len(self.labelFiles)):\n with open(self.anno_train + self.labelFiles[i]) as anno_file:\n self.anno = json.load(anno_file)\n\n frame_id = []\n file_name = []\n for j in range(len(self.anno['images'])):\n if self.anno['images'][j]['is_labeled'] == True:\n frame_id.append(self.anno['images'][j]['frame_id'])\n file_name.append(self.anno['images'][j]['file_name'])\n\n count = 0\n tempBbox = []\n tempkpts = []\n for j in range(len(self.anno['annotations'])):\n if self.anno['annotations'][j]['image_id'] == frame_id[count]:\n tempBbox.append(self.anno['annotations'][j]['bbox_head'])\n tempkpts.append(self.anno['annotations'][j]['keypoints'])\n\n elif self.anno['annotations'][j]['image_id'] != frame_id[count]\\\n and self.anno['annotations'][j-1]['image_id'] == frame_id[count]:\n self.box_head[masterCount] = tempBbox\n self.keypoints[masterCount] = tempkpts\n self.img_List[masterCount] = file_name[count-1]\n\n count += 1\n masterCount += 1\n tempBbox = []\n tempkpts = []\n\n tempBbox.append(self.anno['annotations'][j]['bbox_head'])\n tempkpts.append(self.anno['annotations'][j]['keypoints'])\n\n else:\n print(self.anno['annotations'][j]['image_id'])\n \n self.box_head[masterCount] = tempBbox\n self.keypoints[masterCount] = tempkpts\n self.img_List[masterCount] = file_name[count]\n\n self.train_list = self.img_List\n\n print(\"Images for train: \",len(self.train_list))\n\n else:\n self.labelFiles = [f for f in os.listdir(self.anno_val)]\n self.labelFiles.sort()\n\n count = 0\n masterCount = 0\n for i in range(len(self.labelFiles)):\n with open(self.anno_val + self.labelFiles[i]) as anno_file:\n self.anno = json.load(anno_file)\n\n frame_id = []\n file_name = []\n for j in range(len(self.anno['images'])):\n if self.anno['images'][j]['is_labeled'] == True:\n frame_id.append(self.anno['images'][j]['frame_id'])\n file_name.append(self.anno['images'][j]['file_name'])\n\n count = 0\n tempBbox = []\n tempkpts = []\n for j in range(len(self.anno['annotations'])):\n if self.anno['annotations'][j]['image_id'] == frame_id[count]:\n tempBbox.append(self.anno['annotations'][j]['bbox_head'])\n tempkpts.append(self.anno['annotations'][j]['keypoints'])\n\n elif self.anno['annotations'][j]['image_id'] != frame_id[count]\\\n and self.anno['annotations'][j-1]['image_id'] == frame_id[count]:\n self.box_head[masterCount] = tempBbox\n self.keypoints[masterCount] = tempkpts\n self.img_List[masterCount] = file_name[count-1]\n\n count += 1\n masterCount += 1\n tempBbox = []\n tempkpts = []\n\n tempBbox.append(self.anno['annotations'][j]['bbox_head'])\n tempkpts.append(self.anno['annotations'][j]['keypoints'])\n\n else:\n print(self.anno['annotations'][j]['image_id'])\n \n self.box_head[masterCount] = tempBbox\n self.keypoints[masterCount] = tempkpts\n self.img_List[masterCount] = file_name[count]\n\n self.val_list = self.img_List\n\n print(\"Images for val: \",len(self.val_list))\n \n\n\n def __getitem__(self, index):\n if self.is_train:\n items = \"/home/bm3768/Desktop/Pose/dataset/PoseTrack/\" + self.train_list[index]\n else:\n items = \"/home/bm3768/Desktop/Pose/dataset/PoseTrack/\" + self.val_list[index]\n\n im = cv2.imread(items)\n if im is None:\n print(items)\n\n img = np.array(im,dtype=np.float32)\n kps = np.asarray(self.keypoints[index])\n\n center = {}\n\n center[0] = [img.shape[0]/2,img.shape[1]/2]\n\n # print(\"kps \", kps.shape)\n\n kpt = np.zeros((kps.shape[0],17,3))\n for i in range(kps.shape[0]):\n points = np.reshape(kps[i], (17,3))\n kpt[i] = points\n\n kpts = np.zeros((kpt.shape[0]*17,3))\n\n for i in range(kpt.shape[0]):\n kpts[17*i:17*(i+1),:] = kpt[i,:,:] \n\n # print(\"Image \", img.shape)\n # print(\"Kpt \", kpt.shape)\n # print(\"Kpts \", kpts.shape)\n # print(\"Center \", center)\n\n\n img, kpts, center = self.transformer(img, kpts, center)\n\n for i in range(kpt.shape[0]):\n kpt[i,:,:] = kpts[17*i:17*(i+1),:]\n\n # kpt = torch.Tensor(kpt)\n\n # print(\"Image \", img.shape)\n # print(\"Kpt \", kpt.shape)\n # print(\"Center \", center)\n\n height, width, _ = img.shape\n\n # kpt = np.zeros((17,3))\n # for i in range(kpts.shape[0]):\n # kpt = kpt + kpts[i,:,:]\n\n # print(kpt[:,2])\n\n # np.clip(kpt[:,2],0,1,kpt[:,2])\n\n # print(kpt[:,2])\n\n box = getBoundingBox(img, kpt, height, width, self.stride)\n\n heatmaps = np.zeros((kpt.shape[0],int(height/self.stride), int(width/self.stride), int(kpt.shape[1]+1)), dtype=np.float32)\n for i in range(kpt.shape[0]):\n for j in range(kpt.shape[1]):\n # resize from 368 to 46\n x = int(kpt[i,j,0]) * 1.0 / self.stride\n y = int(kpt[i,j,1]) * 1.0 / self.stride\n heat_map = guassian_kernel(size_h=height / self.stride, size_w=width / self.stride, center_x=x, center_y=y, sigma=self.sigma)\n heat_map[heat_map > 1] = 1\n heat_map[heat_map < 0.0099] = 0\n heatmaps[i,:, :, j + 1] = heat_map\n\n heatmaps[i,:, :, 0] = 1.0 - np.max(heatmaps[i,:, :, 1:], axis=2) # for background\n\n # print(heatmaps.shape)\n\n # heatmap = np.zeros((int(height/self.stride), int(width/self.stride), int(kpt.shape[1]+1)), dtype=np.float32)\n\n heatmap = np.sum(heatmaps,axis=0)\n # print(heatmap.shape)\n\n centermap = np.zeros((height, width, 1), dtype=np.float32)\n center_map = guassian_kernel(size_h=height, size_w=width, center_x=center[0][0], center_y=center[0][1], sigma=3)\n center_map[center_map > 1] = 1\n center_map[center_map < 0.0099] = 0\n centermap[:, :, 0] = center_map\n\n img = Mytransforms.normalize(Mytransforms.to_tensor(img), [128.0, 128.0, 128.0],\n [256.0, 256.0, 256.0])\n heatmap = Mytransforms.to_tensor(heatmap)\n centermap = Mytransforms.to_tensor(centermap)\n box = Mytransforms.to_tensor(box)\n\n return img, heatmap, centermap, items, 0, box\n\n\n def __len__(self):\n if self.is_train:\n return len(self.train_list)\n else:\n return len(self.val_list)\n\n\n # #center = torch.Tensor(items['objpos'])\n # center = items['objpos']\n # scale = items['scale_provided']\n\n # if center[0] != -1:\n # center[1] = center[1] + 15*scale\n # scale = scale*1.25\n\n\n # nParts = pts.size(0)\n\n # img = np.array(cv2.imread(img_path), dtype=np.float32)\n\n # # expand dataset\n # img, kpt, center = self.transformer(img, pts, center, scale)\n # height, width, _ = img.shape\n\n # heatmap = np.zeros((int(height/self.stride), int(width/self.stride), int(len(kpt)+1)), dtype=np.float32)\n # for i in range(len(kpt)):\n # # resize from 368 to 46\n # x = int(kpt[i][0]) * 1.0 / self.stride\n # y = int(kpt[i][1]) * 1.0 / self.stride\n # heat_map = guassian_kernel(size_h=height / self.stride, size_w=width / self.stride, center_x=x, center_y=y, sigma=self.sigma)\n # heat_map[heat_map > 1] = 1\n # heat_map[heat_map < 0.0099] = 0\n # heatmap[:, :, i + 1] = heat_map\n\n # heatmap[:, :, 0] = 1.0 - np.max(heatmap[:, :, 1:], axis=2) # for background\n\n # centermap = np.zeros((height, width, 1), dtype=np.float32)\n # center_map = guassian_kernel(size_h=height, size_w=width, center_x=center[0], center_y=center[1], sigma=3)\n # center_map[center_map > 1] = 1\n # center_map[center_map < 0.0099] = 0\n # centermap[:, :, 0] = center_map\n\n # img = Mytransforms.normalize(Mytransforms.to_tensor(img), [128.0, 128.0, 128.0],\n # [256.0, 256.0, 256.0])\n # heatmap = Mytransforms.to_tensor(heatmap)\n # centermap = Mytransforms.to_tensor(centermap)\n\n # return img, heatmap, centermap, img_path\n\n\n # def __len__(self):\n # if self.is_train:\n # return len(self.train_list)\n # else:\n # return len(self.val_list)", "sub_path": "utils/posetrack_data.py", "file_name": "posetrack_data.py", "file_ext": "py", "file_size_in_byte": 13199, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.mgrid", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.exp", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 60, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 75, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 75, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 106, "usage_type": "call"}, {"api_name": "json.load", "line_number": 113, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 156, "usage_type": "call"}, {"api_name": "json.load", "line_number": 163, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 217, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 217, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 218, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 226, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 231, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 267, "usage_type": "attribute"}, {"api_name": "numpy.max", "line_number": 278, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 287, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 287, "usage_type": "attribute"}, {"api_name": "utils.Mytransforms.normalize", "line_number": 293, "usage_type": "call"}, {"api_name": "utils.Mytransforms", "line_number": 293, "usage_type": "name"}, {"api_name": "utils.Mytransforms.to_tensor", "line_number": 293, "usage_type": "call"}, {"api_name": "utils.Mytransforms.to_tensor", "line_number": 295, "usage_type": "call"}, {"api_name": "utils.Mytransforms", "line_number": 295, "usage_type": "name"}, {"api_name": "utils.Mytransforms.to_tensor", "line_number": 296, "usage_type": "call"}, {"api_name": "utils.Mytransforms", "line_number": 296, "usage_type": "name"}, {"api_name": "utils.Mytransforms.to_tensor", "line_number": 297, "usage_type": "call"}, {"api_name": "utils.Mytransforms", "line_number": 297, "usage_type": "name"}]} +{"seq_id": "418684225", "text": "import json\nimport sys\nimport traceback\n\ndeanonymiser_file = sys.argv[1]\ncomplex_file = sys.argv[2]\ngold_simple_file = sys.argv[3]\nbeam_file = sys.argv[4]\n\ndef deanonymize(sentence, deanonymizer):\n deanonymizer = json.loads(deanonymizer)\n deanonymized_sentence = []\n for token in sentence.split(\" \"):\n deanonymized_sentence.append(deanonymizer.get(token, token).decode(\"utf-8\"))\n return \" \".join(deanonymized_sentence)\n\nwith open(deanonymiser_file) as f:\n deanonymizer = f.readlines()\ndeanonymizer = [l.strip() for l in deanonymizer]\n\nwith open(complex_file) as f:\n gold_complex_data = f.readlines()\ngold_complex_data = [l.strip() for l in gold_complex_data]\n\nwith open(gold_simple_file) as f:\n gold_simple_data = f.readlines()\ngold_simple_data = [l.strip() for l in gold_simple_data]\n\n# Candidate file w/ beam output\nwith open(beam_file) as f:\n predicted_simple_data = f.readlines()\npredicted_simple_data = [l.strip() for l in predicted_simple_data]\n\nbeam_size = 400\n\nwith open(\"data/new_data/test/dictionary_seq2seq_att.txt\", \"w\") as f:\n for idx in range(len(gold_complex_data)):\n gold_complex = deanonymize(gold_complex_data[idx], deanonymizer[idx]).encode(\"utf-8\")\n gold_simple = deanonymize(gold_simple_data[idx], deanonymizer[idx]).encode(\"utf-8\")\n predicted_simple = predicted_simple_data[idx*400 : (idx+1)*400]\n predicted_simple = [deanonymize(l, deanonymizer[idx]).encode(\"utf-8\") for l in predicted_simple]\n f.write(gold_complex)\n f.write(\"\\n\")\n f.write(gold_simple)\n f.write(\"\\n\")\n f.write(str(len(predicted_simple)))\n f.write(\"\\n\")\n f.write(\"\\n\".join(predicted_simple))\n f.write(\"\\n\")\n", "sub_path": "deanonymize.py", "file_name": "deanonymize.py", "file_ext": "py", "file_size_in_byte": 1719, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.argv", "line_number": 5, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "92568170", "text": "# Standard library\nimport os\nimport time\nimport xml.dom.minidom as minidom\nimport sys\n\n# Third-party depedencies\nfrom bottle import run, route, response, request, post, get\n\n# Local modules\nfrom src.estimator import estimator, sample_input_data\n\n\ndef create_xml_from_json(json_data: dict):\n '''Generate xml from json. Doesn\\'t work properly yet'''\n document = minidom.Document()\n rootElement = document.appendChild(\n document.createElement('covid-19-estimate'))\n for key, value in json_data.items():\n element = document.createElement(str(key))\n if isinstance(value, dict):\n for key, value in value.items():\n childElement = document.createElement(str(key))\n childElement.appendChild(document.createTextNode(str(value)))\n element.appendChild(childElement)\n\n else:\n element.appendChild(document.createTextNode(str(value)))\n\n rootElement.appendChild(element)\n return document\n\n\ndef allow_cors(func):\n def wrapper(*args, **kwargs):\n response.set_header('Access-Control-Allow-Origin', '*')\n response.set_header('Access-Control-Allow-Methods',\n 'GET, POST, PUT, OPTIONS')\n response.set_header('Access-Control-Allow-Headers',\n 'Origin, Accept, Content-Type,X-Requested-With')\n if request.method != 'OPTIONS':\n return func(*args, **kwargs)\n return wrapper\n\n\n@route('/api/v1/on-covid-19', method=['OPTIONS', 'POST'])\n@route('/api/v1/on-covid-19/', method=['OPTIONS', 'POST'])\n@allow_cors\ndef json_response():\n reques_time = time.monotonic()\n\n try:\n data = estimator(request.json)\n except:\n data = {}\n\n with open('./access.log', 'a') as log:\n log.write(\n f'{request.method} \\t\\t {request.path} \\t\\t {response.status_code} \\t\\t {round(time.monotonic()-reques_time, 3)} S \\n')\n\n return data\n\n\n@route('/api/v1/on-covid-19/xml', method=['OPTIONS', 'POST'])\n@route('/api/v1/on-covid-19/xml/', method=['OPTIONS', 'POST'])\n@allow_cors\ndef xml_response():\n reques_time = time.monotonic()\n\n response.set_header('content-type', 'text/xml')\n\n try:\n data = estimator(request.json)\n except:\n data = {}\n\n xml_data = create_xml_from_json(data).toprettyxml()\n\n with open('./access.log', 'a') as log:\n log.write(\n f'{request.method} \\t\\t {request.path} \\t\\t {response.status_code} \\t\\t {round(time.monotonic()-reques_time, 3)} S \\n')\n\n return xml_data\n\n\n@route('/api/v1/on-covid-19/logs/', method=['OPTIONS', 'GET'])\n@route('/api/v1/on-covid-19/logs', method=['OPTIONS', 'GET'])\ndef logs():\n reques_time = time.monotonic()\n\n response.set_header('content-type', 'text/plain')\n\n with open('./access.log', 'a') as log:\n log.write(\n f'{request.method} \\t\\t {request.path} \\t\\t {response.status_code} \\t\\t {round(time.monotonic()-reques_time, 3)} S \\n')\n\n with open('./access.log', 'r') as log:\n return log.read()\n\n\nrun(host='localhost', port=8080, debug=True, reloader=True)\n", "sub_path": "application.py", "file_name": "application.py", "file_ext": "py", "file_size_in_byte": 3096, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "xml.dom.minidom.Document", "line_number": 16, "usage_type": "call"}, {"api_name": "xml.dom.minidom", "line_number": 16, "usage_type": "name"}, {"api_name": "bottle.response.set_header", "line_number": 36, "usage_type": "call"}, {"api_name": "bottle.response", "line_number": 36, "usage_type": "name"}, {"api_name": "bottle.response.set_header", "line_number": 37, "usage_type": "call"}, {"api_name": "bottle.response", "line_number": 37, "usage_type": "name"}, {"api_name": "bottle.response.set_header", "line_number": 39, "usage_type": "call"}, {"api_name": "bottle.response", "line_number": 39, "usage_type": "name"}, {"api_name": "bottle.request.method", "line_number": 41, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 41, "usage_type": "name"}, {"api_name": "time.monotonic", "line_number": 50, "usage_type": "call"}, {"api_name": "src.estimator.estimator", "line_number": 53, "usage_type": "call"}, {"api_name": "bottle.request.json", "line_number": 53, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 53, "usage_type": "name"}, {"api_name": "bottle.request.method", "line_number": 59, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 59, "usage_type": "name"}, {"api_name": "bottle.request.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "bottle.response.status_code", "line_number": 59, "usage_type": "attribute"}, {"api_name": "bottle.response", "line_number": 59, "usage_type": "name"}, {"api_name": "time.monotonic", "line_number": 59, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 46, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 47, "usage_type": "call"}, {"api_name": "time.monotonic", "line_number": 68, "usage_type": "call"}, {"api_name": "bottle.response.set_header", "line_number": 70, "usage_type": "call"}, {"api_name": "bottle.response", "line_number": 70, "usage_type": "name"}, {"api_name": "src.estimator.estimator", "line_number": 73, "usage_type": "call"}, {"api_name": "bottle.request.json", "line_number": 73, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 73, "usage_type": "name"}, {"api_name": "bottle.request.method", "line_number": 81, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 81, "usage_type": "name"}, {"api_name": "bottle.request.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "bottle.response.status_code", "line_number": 81, "usage_type": "attribute"}, {"api_name": "bottle.response", "line_number": 81, "usage_type": "name"}, {"api_name": "time.monotonic", "line_number": 81, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 64, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 65, "usage_type": "call"}, {"api_name": "time.monotonic", "line_number": 89, "usage_type": "call"}, {"api_name": "bottle.response.set_header", "line_number": 91, "usage_type": "call"}, {"api_name": "bottle.response", "line_number": 91, "usage_type": "name"}, {"api_name": "bottle.request.method", "line_number": 95, "usage_type": "attribute"}, {"api_name": "bottle.request", "line_number": 95, "usage_type": "name"}, {"api_name": "bottle.request.path", "line_number": 95, "usage_type": "attribute"}, {"api_name": "bottle.response.status_code", "line_number": 95, "usage_type": "attribute"}, {"api_name": "bottle.response", "line_number": 95, "usage_type": "name"}, {"api_name": "time.monotonic", "line_number": 95, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 86, "usage_type": "call"}, {"api_name": "bottle.route", "line_number": 87, "usage_type": "call"}, {"api_name": "bottle.run", "line_number": 101, "usage_type": "call"}]} +{"seq_id": "156917074", "text": "\"\"\"\nscripts_canal.py\n==========\nFunctions and results of TP2 : Flow in channel\n\n27/03/2018\nMiguel Calpe Linares\n\n\"\"\"\nfrom __future__ import print_function\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom tp_base import TPBase\n\nclass TP1Canal(TPBase):\n def __init__(self, name=None, HAS_TO_SAVE=False):\n super(TP1Canal, self).__init__()\n self.tag = '_tp1_'\n self.width_canal = 0.04 # width canal in m\n self.name = name\n self.HAS_TO_SAVE = HAS_TO_SAVE\n \n def compute_water_charge(self, volume, time, units_volume='ml'):\n \"\"\"Computes water charge (m^3/s) from volume in ml \n and time in seconds.\n \"\"\"\n if units_volume == 'ml':\n volume_iu = volume / 1e6\n else:\n raise NotImplementedError\n \n return volume_iu / time\n\n def compute_energy(self, charge, h):\n \"\"\" . \"\"\"\n rho = self.rho\n g = self.gravity\n d = self.width_canal\n ek = 0.5 * rho * ((charge**2) / (h*d)**2)\n ep = rho * g * h\n return ek + ep\n\n def compute_froude(self, charge, h):\n \"\"\" . \"\"\"\n g = self.gravity\n d = self.width_canal\n denom = h *d * np.sqrt(h * g)\n return charge / denom\n\n\nif __name__ == '__main__':\n\n # Create object TP1Canal\n tp = TP1Canal(name='calpe', HAS_TO_SAVE=True)\n\n ## QUESTION 1.1 : Mesure water charge & incertitude\n volumes = [550, 600, 650] # in ml\n times = [2.8, 2.93, 3.] # in seconds\n\n charges = []\n\n for index, volume in enumerate(volumes):\n q = tp.compute_water_charge(\n volume, times[index], units_volume='ml')\n charges.append(q)\n\n charge = np.mean(charges)\n print('The water charge question 1.1 measured is {} m^3/s'.format(charge))\n\n # Compute incertitude of the first value of Q\n # dv = 10 ml ; dt = 0.5s\n incert_q = 10 / volumes[0] + 0.5 / times[0]\n \n to_write = (\n 'Question 1.1 \\n' +\n 'Q = {} m3/s \\n'.format(np.mean(charges)) +\n 'incert_q = {} \\n'.format(incert_q))\n\n ## QUESTION 2.1: Mesure h(x)\n hs = [0.025, 0.023, 0.02, 0.018, 0.015] # in m\n\n ## QUESTION 2.2: Mesure E(x)\n energies = []\n for h in hs:\n energies.append(tp.compute_energy(charge, h))\n\n froudes = []\n for h in hs:\n froudes.append(tp.compute_froude(charge, h))\n\n fig, ax = plt.subplots()\n ax.plot(energies, hs)\n\n to_write += (\n 'Question 2.1 and 2.2 \\n' +\n 'h = {} m \\n'.format(hs) +\n 'energies = {} \\n'.format(energies) +\n 'froudes = {} \\n'.format(froudes))\n\n\n ## QUESTION 3.5: Ressaut hydraulique\n # Compute water charge modified\n volumes = [480, 580, 430] # volumes in ml\n times = [3, 3.8, 2.8] # times in seconds\n\n charges = []\n\n for index, volume in enumerate(volumes):\n q = tp.compute_water_charge(\n volume, times[index], units_volume='ml')\n charges.append(q)\n\n charge = np.mean(charges)\n print('The water charge question 3.5 measured is {} m^3/s'.format(charge))\n \n # Compute Froude\n h_amont = 0.032 # in m\n h_aval = 0.008 # in m\n froude_amont = tp.compute_froude(charge, h_amont)\n froude_aval = tp.compute_froude(charge, h_aval)\n \n to_write += (\n 'Question 3.5 \\n' +\n 'Q = {} m3/s \\n'.format(charge) +\n 'froude_amont = {}'.format(froude_amont) +\n 'froude_aval = {}'.format(froude_aval))\n\n ## Question 3.8;\n hs = [0.032, 0.008, 0.017]\n energies = []\n froudes = []\n for h in hs:\n energies.append(tp.compute_energy(charge, h))\n froudes.append(tp.compute_froude(charge, h))\n \n if tp.HAS_TO_SAVE:\n tp.write_results_to_txt(to_write)\n\n plt.show()\n \n \n \n\n \n", "sub_path": ".ipynb_checkpoints/tp1_canal-checkpoint.py", "file_name": "tp1_canal-checkpoint.py", "file_ext": "py", "file_size_in_byte": 3759, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "tp_base.TPBase", "line_number": 17, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}]} +{"seq_id": "593787222", "text": "import re, collections\nfrom nltk.corpus import words\n\n\ndef train(features):\n model = collections.defaultdict(lambda: 1)\n for f in features:\n f = f.lower()\n model[f] += 1\n return model\n\n\ntechnical_list = [\"software\"]\ndictionary_words = words.words() + technical_list\nWORD_LIST = train(dictionary_words)\n\nalphabet = list(map(chr, range(97, 123)))\n\ndef edits1(word):\n splits = [(word[:i], word[i:]) for i in range(len(word) + 1)]\n deletes = [a + b[1:] for a, b in splits if b]\n transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]\n replaces = [a + c + b[1:] for a, b in splits for c in alphabet if b]\n inserts = [a + c + b for a, b in splits for c in alphabet]\n return set(deletes + transposes + replaces + inserts)\n\ndef known_edits2(word):\n return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in WORD_LIST)\n\ndef known(words): \n return set(w for w in words if w in WORD_LIST)\n\ndef spellCorrect(word):\n candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]\n #print candidates\n #return candidates\n return max(candidates, key=WORD_LIST.get)\n\n'''while True:\n wrong_spelling = raw_input(\"Enter a wrong spelling: \")\n print \"%s --> %s\\n\" %(wrong_spelling, correct(wrong_spelling))'''\n", "sub_path": "mysite/assessment/SpellChecker.py", "file_name": "SpellChecker.py", "file_ext": "py", "file_size_in_byte": 1312, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "collections.defaultdict", "line_number": 6, "usage_type": "call"}, {"api_name": "nltk.corpus.words.words", "line_number": 14, "usage_type": "call"}, {"api_name": "nltk.corpus.words", "line_number": 14, "usage_type": "name"}, {"api_name": "nltk.corpus.words", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "440129132", "text": "\"\"\" JSON log module \"\"\"\nfrom datetime import datetime\nfrom os import path, remove, scandir\nfrom typing import List\nfrom databus.client.log import Log\nfrom databus.database.json_db.json_client import JsonClient\nfrom databus.database.json_db.json_database_arguments import JsonDatabaseArguments\nfrom databus.database.json_db.json_path_builder import JsonPathBuilder\n\n\nclass JsonLog:\n \"\"\" JSON log class \"\"\"\n def __init__(self, args: JsonDatabaseArguments):\n self._args = args\n self._client = JsonClient(args)\n\n def build_log_file_name(self, p_log: Log) -> str:\n \"\"\" Builds log file name \"\"\"\n datetime_part = p_log.creation_datetime.isoformat()\n safe_datetime_part = datetime_part.replace(\":\", \"_\")\n guid_part = str(p_log.guid)\n return safe_datetime_part + \"_\" + guid_part + \".\" + self._args.log_extension\n\n def build_log_file_path(self, p_client_id: str, p_log: Log) -> str:\n \"\"\" Builds log file path \"\"\"\n return path.join(self.get_root_path(p_client_id),\n self.build_log_file_name(p_log))\n\n def delete_log_file_before(self, p_client_id: str, p_before: datetime, p_log: Log):\n \"\"\" Deletes log files before the given date \"\"\"\n log_root_path = self.get_root_path(p_client_id)\n all_log_files = self.get_log_file_list(p_client_id)\n for log_file in all_log_files:\n split1 = log_file.split(\"T\")\n split2 = split1[0].split(\"-\")\n log_file_date = datetime(year=int(split2[0]), month=int(split2[1]), day=int(split2[2]))\n if log_file_date < p_before:\n full_log_file_path = path.join(log_root_path, log_file)\n p_log.append_text(\"Deleting \" + full_log_file_path)\n remove(full_log_file_path)\n\n def get_log_file_content(self, p_client_id: str, p_log_file: str) -> str:\n \"\"\" Returns the content of the given log file \"\"\"\n output = \"\"\n log_path = path.join(self.get_root_path(p_client_id), p_log_file)\n with open(log_path, mode=\"r\") as log_file:\n output = log_file.read()\n return output\n\n def get_log_file_list(self, p_client_id: str) -> List[str]:\n \"\"\" Log file list \"\"\"\n output = []\n log_root_path = self.get_root_path(p_client_id)\n file_list = [f.name for f in scandir(log_root_path) if f.is_file()]\n supposed_extension = self._args.log_extension.lower()\n for file_candidate in file_list:\n extension = path.splitext(file_candidate)[1].replace(\".\", \"\").lower()\n if extension == supposed_extension:\n output.append(file_candidate)\n return output\n\n def get_root_path(self, p_client_id: str) -> str:\n \"\"\" Returns the root log path for the given client \"\"\"\n return self._get_path_builder(p_client_id).log_root_path\n\n def insert(self, p_client_id: str, p_log: Log):\n \"\"\" Writes log file to disk \"\"\"\n log_file_content = p_log.entries_as_string\n log_file_path = self.build_log_file_path(p_client_id, p_log)\n\n log_file = open(log_file_path, \"w+\")\n log_file.write(log_file_content)\n log_file.close()\n\n def _get_path_builder(self, p_client_id: str) -> JsonPathBuilder:\n return JsonPathBuilder(p_client_id, self._args)\n", "sub_path": "databus/database/json_db/json_log.py", "file_name": "json_log.py", "file_ext": "py", "file_size_in_byte": 3318, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "databus.database.json_db.json_database_arguments.JsonDatabaseArguments", "line_number": 13, "usage_type": "name"}, {"api_name": "databus.database.json_db.json_client.JsonClient", "line_number": 15, "usage_type": "call"}, {"api_name": "databus.client.log.Log", "line_number": 17, "usage_type": "name"}, {"api_name": "databus.client.log.Log", "line_number": 24, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 29, "usage_type": "name"}, {"api_name": "databus.client.log.Log", "line_number": 29, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "name"}, {"api_name": "os.remove", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "name"}, {"api_name": "os.scandir", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 50, "usage_type": "name"}, {"api_name": "databus.client.log.Log", "line_number": 66, "usage_type": "name"}, {"api_name": "databus.database.json_db.json_path_builder.JsonPathBuilder", "line_number": 76, "usage_type": "call"}, {"api_name": "databus.database.json_db.json_path_builder.JsonPathBuilder", "line_number": 75, "usage_type": "name"}]} +{"seq_id": "292635731", "text": "from pprint import pprint\nfrom markdown import Markdown\nfrom markdownify import markdownify\nimport re\n\nwith open('README.md', 'r') as fh:\n data = fh.read()\nmd = Markdown(extensions=['toc'])\nmd.convert(data)\n\ntoc = markdownify(md.toc, bullets=\"***\")\ntoc_lines = toc.replace('\\t', ' ').split('\\n')\ntoc_lines = toc_lines[1:]\noutput = []\nfor line in toc_lines:\n line = re.sub('^\\s{3}', '', line)\n if line == '':\n continue\n output.append(line)\n\nprint('\\n')\nprint('\\n'.join(output))\nprint('\\n')\n", "sub_path": "assets/scripts/toc.py", "file_name": "toc.py", "file_ext": "py", "file_size_in_byte": 561, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "markdown.Markdown", "line_number": 8, "usage_type": "call"}, {"api_name": "markdownify.markdownify", "line_number": 11, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "352966313", "text": "# Author: Amy Tipple, altipple@gmail.com, 2017/06/13\r\n\r\n\"\"\"From a random sample of points retrieve routes\r\n from the Google Directions routing API.\r\n\"\"\"\r\n\r\nimport fiona\r\nimport json\r\nimport os\r\nimport polyline\r\nimport re\r\nimport requests\r\nfrom shapely.geometry import mapping, LineString\r\nimport time\r\n\r\nprjdetails = \"GEOGCS[\\\"GCS_WGS_1984\\\",DATUM[\\\"D_WGS_1984\\\",SPHEROID[\\\"WGS_1984\\\",6378137.0,298.257223563]],PRIMEM[\\\"Greenwich\\\",0.0],UNIT[\\\"Degree\\\",0.0174532925199433]]\"\r\noutpath = \"outpath\"\r\nshppath = \"pathtoshp\"\r\ngoogout = outpath + \"/\" + \"GoogleOut.shp\"\r\ngoogprj = outpath + \"/\" + \"GoogleOut.prj\"\r\nsample = shppath + \"/\" + \"Sample.shp\"\r\nhqshp = shppath + \"/\" + \"HQ.shp\"\r\n\r\n# Nissan HQ LatLon\r\nhq = []\r\nwith fiona.open(hqshp) as src:\r\n hq.append((src[0]['geometry']['coordinates']))\r\nHQLL = str(hq[0][1]) + \",\" + str(hq[0][0])\r\n\r\ndef gRoute(start,end):\r\n \"\"\"Google routing api return route from A to B\"\"\"\r\n base = 'https://maps.googleapis.com/maps/api/directions/json?origin='\r\n dest = '&destination='\r\n key = '&key=YOURKEY'\r\n req = base + start + dest + end + key\r\n r = requests.get(req)\r\n return r.text\r\n\r\ndef getseconds(TX):\r\n \"\"\"Convert time from hours and minutes to seconds\r\n The time could be just hours or just minutes or both\"\"\"\r\n mn = []\r\n hr = []\r\n totaltime = []\r\n if re.search(\"hour|hours\",TX):\r\n hr.append(TX.split(\" hour\")[0])\r\n if re.search(\"min|mins\",TX):\r\n m = (TX.split(\" min\")[0]).split(\" \")[-1]\r\n mn.append((TX.split(\" min\")[0]).split(\" \")[-1])\r\n if len(hr) != 0:\r\n h = int(hr[0]) * 3600\r\n totaltime.append(h)\r\n if len(mn) != 0:\r\n m = int(mn[0]) * 60\r\n totaltime.append(m) \r\n ttime = sum(totaltime)\r\n return ttime\r\n\r\ndef gparse(textfile):\r\n \"\"\"parse route from Google api response\"\"\"\r\n with open(textfile,'r') as jj:\r\n x = json.load(jj)\r\n d = x['routes'][0]['legs'][0]['distance']['text'].replace(\" mi\",\"\")\r\n dd = d.replace(\" ft\",\"\")\r\n dist = float(dd) * 1.60934\r\n t = x['routes'][0]['legs'][0]['duration']['text']\r\n TStime = getseconds(t)\r\n pl = [polyline.decode(p['polyline']['points']) for p in x['routes'][0]['legs'][0]['steps']]\r\n pline = [] \r\n for p in pl:\r\n for pp in p:\r\n # swap latlon to lonlat in each tuple\r\n pline.append((pp[1],pp[0]))\r\n return [pline,dist,TStime]\r\n\r\n# Output shape file schema\r\nschema = {\r\n 'geometry':'LineString',\r\n 'properties':{\r\n 'Name':'str',\r\n 'Distance':'float',\r\n 'Time':'float'\r\n },\r\n }\r\n\r\n\"\"\"\r\n# write to out shape, go through each line of the sample file for the\r\n# end lat lon and name\r\n# submit route from HQ to end lat lon for each point in sample\r\n# return json response from api\r\n# parse api for all points to make a polyline of the route and distance and time\r\n# output results to shapefile\r\n# output each response to a text file for later use of details\r\n\"\"\"\r\n\r\nwith fiona.open(googout,'w','ESRI Shapefile',schema) as c:\r\n with fiona.open(sample) as sam:\r\n for i,line in enumerate(sam):\r\n LL = (sam[i]['geometry']['coordinates'])\r\n samLon = LL[0]\r\n samLat = LL[1]\r\n properties = list(sam[i]['properties'].items())\r\n name = properties[2][1]\r\n destination = str(samLat) + \",\" + str(samLon)\r\n # write response to text file for safe keeping\r\n # parse to shape (from json)\r\n fpath = outpath + \"/G_\" + str(i) + name + \".txt\"\r\n if os.path.exists(fpath):\r\n parse = gparse(fpath)\r\n if len(parse[0]) < 2:\r\n pass\r\n else:\r\n c.write({\r\n 'geometry': mapping(LineString(parse[0])),\r\n 'properties':{\r\n 'Name':name,\r\n 'Distance':parse[1],\r\n 'Time':parse[2]\r\n }\r\n })\r\n else:\r\n ap = gRoute(HQLL,destination)\r\n with open(fpath,'w') as fp_out:\r\n fp_out.write(ap)\r\n parse = gparse(fpath)\r\n if len(parse[0]) < 2:\r\n pass\r\n else:\r\n c.write({\r\n 'geometry': mapping(LineString(parse[0])),\r\n 'properties':{\r\n 'Name':name,\r\n 'Distance':parse[1],\r\n 'Time':parse[2]\r\n }\r\n }) \r\n time.sleep(5)\r\n print (i,name)\r\n\r\n \r\n# write prj file\r\nwith open(googprj,\"w\") as fp_prj:\r\n fp_prj.write(prjdetails)\r\n\r\nprint (\"Complete\")\r\n", "sub_path": "RoutingAPIsOpen/readshpGoogle.py", "file_name": "readshpGoogle.py", "file_ext": "py", "file_size_in_byte": 4980, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "fiona.open", "line_number": 26, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 36, "usage_type": "call"}, {"api_name": "re.search", "line_number": 45, "usage_type": "call"}, {"api_name": "re.search", "line_number": 47, "usage_type": "call"}, {"api_name": "json.load", "line_number": 62, "usage_type": "call"}, {"api_name": "polyline.decode", "line_number": 68, "usage_type": "call"}, {"api_name": "fiona.open", "line_number": 96, "usage_type": "call"}, {"api_name": "fiona.open", "line_number": 97, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "shapely.geometry.mapping", "line_number": 114, "usage_type": "call"}, {"api_name": "shapely.geometry.LineString", "line_number": 114, "usage_type": "call"}, {"api_name": "shapely.geometry.mapping", "line_number": 130, "usage_type": "call"}, {"api_name": "shapely.geometry.LineString", "line_number": 130, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "522251716", "text": "\"\"\"\n$description TikTok is a short-form video hosting service owned by ByteDance\n$url www.tiktok.com\n$type live\n\"\"\"\nimport re\nimport logging\nimport streamlink.plugin\nimport streamlink.stream\n\nfrom streamlink.exceptions import NoStreamsError, PluginError\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.http import HTTPStream\nfrom streamlink.stream.hls import HLSStream, HLSStreamReader, HLSStreamWorker, HLSStreamWriter\nfrom streamlink.stream.hls_playlist import M3U8, ByteRange, DateRange, ExtInf, Key, M3U8Parser, Map, load as load_hls_playlist\nimport requests\n\nlog = logging.getLogger(__name__)\n\n\n_post_schema = validate.Schema(\n {\n \"cam\": validate.Schema({\n 'streamName' : str,\n 'viewServers': validate.Schema({'flashphoner-hls': str})\n }),\n \"user\": validate.Schema({\n 'user' : validate.Schema({\n 'status' : str,\n 'isLive' : bool\n })\n }) \n }\n)\n\n@pluginmatcher(\n pattern=re.compile(\n r\"https?://(\\w+\\.)?stripchat\\.com/(?P[a-zA-Z0-9_-]+)\"),\n)\nclass TikTok(Plugin):\n url_re = re.compile(\n r\"https?://(\\w+\\.)?stripchat\\.com/(?P[a-zA-Z0-9_-]+)$\")\n\n def _get_streams(self):\n\n match = self.url_re.match(self.url)\n username = match.group(\"username\")\n api_call = \"https://stripchat.com/api/front/v2/models/username/{0}/cam\".format(username)\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Referer\": self.url,\n }\n\n res = self.session.http.get(api_call, headers=headers)\n data = self.session.http.json(res, schema=_post_schema)\n\n server = \"https://b-{0}.doppiocdn.com/hls/{1}/master_{1}.m3u8\".format(data[\"cam\"][\"viewServers\"][\"flashphoner-hls\"],data[\"cam\"][\"streamName\"])\n server0 = \"https://b-{0}.doppiocdn.com/hls/{1}/{1}.m3u8\".format(data[\"cam\"][\"viewServers\"][\"flashphoner-hls\"],data[\"cam\"][\"streamName\"])\n self.logger.info(\"Stream status: {0}\".format(data[\"user\"][\"user\"][\"status\"]))\n\n if (data[\"user\"][\"user\"][\"isLive\"] is True and data[\"user\"][\"user\"][\"status\"] == \"public\" and server):\n try:\n for s in HLSStream.parse_variant_playlist(self.session,server,headers={'Referer': self.url}).items():\n yield s\n except IOError as err:\n stream = HLSStream(self.session, server0)\n yield \"Auto\", stream\n\n__plugin__ = TikTok\n", "sub_path": "stripchat.py", "file_name": "stripchat.py", "file_ext": "py", "file_size_in_byte": 2660, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "logging.getLogger", "line_number": 19, "usage_type": "call"}, {"api_name": "streamlink.plugin.api.validate.Schema", "line_number": 22, "usage_type": "call"}, {"api_name": "streamlink.plugin.api.validate", "line_number": 22, "usage_type": "name"}, {"api_name": "streamlink.plugin.api.validate.Schema", "line_number": 24, "usage_type": "call"}, {"api_name": "streamlink.plugin.api.validate", "line_number": 24, "usage_type": "name"}, {"api_name": "streamlink.plugin.api.validate.Schema", "line_number": 26, "usage_type": "call"}, {"api_name": "streamlink.plugin.api.validate", "line_number": 26, "usage_type": "name"}, {"api_name": "streamlink.plugin.api.validate.Schema", "line_number": 28, "usage_type": "call"}, {"api_name": "streamlink.plugin.api.validate", "line_number": 28, "usage_type": "name"}, {"api_name": "streamlink.plugin.api.validate.Schema", "line_number": 29, "usage_type": "call"}, {"api_name": "streamlink.plugin.api.validate", "line_number": 29, "usage_type": "name"}, {"api_name": "streamlink.plugin.Plugin", "line_number": 41, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 42, "usage_type": "call"}, {"api_name": "streamlink.stream.hls.HLSStream.parse_variant_playlist", "line_number": 65, "usage_type": "call"}, {"api_name": "streamlink.stream.hls.HLSStream", "line_number": 65, "usage_type": "name"}, {"api_name": "streamlink.stream.hls.HLSStream", "line_number": 68, "usage_type": "call"}, {"api_name": "streamlink.plugin.pluginmatcher", "line_number": 37, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 38, "usage_type": "call"}]} +{"seq_id": "5843014", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n'Mysql里的六度空间游戏(维基百科六度分隔)'\n\n# 设计一个带有两张数据表的数据库来分别存储页面和链接,两张表都带有创建时间和独立的 ID 号\n# CREATE DATABASE wikipedia;\n# CREATE TABLE `wikipedia`.`pages`(`id` INT NOT NULL AUTO_INCREMENT,`url` VARCHAR(255) NOT NULL,`created` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,PRIMARY KEY(`id`));\n# CREATE TABLE `wikipedia`.`links`(`id` INT NOT NULL AUTO_INCREMENT,`fromPageId` INT NULL,`toPageId` INT NULL,`created` TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,PRIMARY KEY(`id`));\n\nfrom bs4 import BeautifulSoup\nimport pymysql\nimport re\nfrom urllib.request import urlopen\n\nconn=pymysql.connect(host='127.0.0.1',user='root',passwd='root',db='mysql',charset='utf8')\ncur=conn.cursor()\ncur.execute(\"USE wikipedia\")\n\ndef insertPageIfNotExists(url):\n cur.execute(\"SELECT id FROM pages where url=%s\",(url))\n if cur.rowcount==0:\n cur.execute(\"INSERT INTO pages (url) VALUES(%s)\",(url))\n conn.commit()\n return cur.lastrowid\n else:\n return cur.fetchone()[0]\n\ndef insertLink(fromPageId,toPageId):\n cur.execute(\"SELECT id FROM links where fromPageId=%s and toPageId=%s\",(int(fromPageId),int(toPageId)))\n if cur.rowcount==0:\n cur.execute(\"INSERT INTO links (fromPageId,toPageId) VALUES(%s,%s)\",(int(fromPageId),int(toPageId)))\n conn.commit()\n\npages=set()\ndef getLinks(pageUrl,recursionLevel):\n global pages\n # 当 recursionLevel 值到 5 的时候,函数会自动返回,不会继续递归。这个限制可以防止数据太大导致内存堆栈溢出。\n if recursionLevel>4:\n return\n pageId=insertPageIfNotExists(pageUrl)\n html=urlopen(\"http://en.wikipedia.org\"+pageUrl)\n bsObj=BeautifulSoup(html,'html.parser')\n # compile处用单引号报错:AttributeError: 'NavigableString' object has no attribute 'attrs'\n for link in bsObj.findAll('a',href=re.compile(\"^(/wiki/)((?!:).)*$\")):\n insertLink(pageId,insertPageIfNotExists(link.attrs['href']))\n if link.attrs['href'] not in pages:\n # 遇到一个新页面,加入集合并搜索里面的词条链接\n newPage=link.attrs['href']\n pages.add(newPage)\n getLinks(newPage,recursionLevel+1)\n \ngetLinks(\"/wiki/Kevin_Bacon\",0)\ncur.close()\nconn.close()\n\n\n\n", "sub_path": "python-scraping/05存储数据/wikipage_links.py", "file_name": "wikipage_links.py", "file_ext": "py", "file_size_in_byte": 2378, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pymysql.connect", "line_number": 16, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 42, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 43, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "218585320", "text": "import torch\nfrom skimage import io, transform\nimport numpy as np\nfrom torch.utils.data import Dataset, DataLoader\nimport matplotlib.pyplot as plt\n\nexperiment = \"datasets/split/60.csv\"\n\nclass PartialDataset(Dataset):\n def __init__(self, mnist_dir, pre_selection_dir):\n self.examples = np.loadtxt(mnist_dir, delimiter=\",\")[:5000]\n self.pre_selections = np.load(pre_selection_dir)\n print(self.pre_selections.shape)\n def __len__(self):\n assert self.examples.shape[0] == self.pre_selections.shape[0]\n return self.examples.shape[0]\n def __getitem__(self, item): #item = index\n input_img = self.examples[item, 1:].reshape(28, 28) / 255.\n unmasked = np.copy(input_img)\n input_img[9:19, 9:19] = 0.5\n pre_sele_img = self.pre_selections[item]\n #plt.figure()\n #plt.imshow(np.squeeze(pre_sele_img))\n #plt.show()\n unmasked = transform.resize(unmasked, (64, 64), preserve_range=True)\n input_img = transform.resize(input_img, (64, 64), preserve_range=True)\n pre_sele_img = transform.resize(pre_sele_img, (64, 64), preserve_range=True)\n unmasked = torch.from_numpy(np.expand_dims(unmasked, 0))\n input_img = torch.from_numpy(np.expand_dims(input_img, 0))\n pre_sele_img = torch.from_numpy(np.expand_dims(pre_sele_img, 0))\n\n example = {\n 'input': input_img, \n 'pre_sele': pre_sele_img,\n 'unmasked': unmasked\n }\n\n return example\n\n\n\n\nif __name__ == '__main__':\n dataset = PartialDataset('datasets/split/preselection_top8_60.npy')\n\n print(len(dataset))\n print(dataset[0]['input'])\n print(dataset[0]['pre_sele'])\n", "sub_path": "data_loader.py", "file_name": "data_loader.py", "file_ext": "py", "file_size_in_byte": 1725, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 9, "usage_type": "name"}, {"api_name": "numpy.loadtxt", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 19, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 25, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 25, "usage_type": "name"}, {"api_name": "skimage.transform.resize", "line_number": 26, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 26, "usage_type": "name"}, {"api_name": "skimage.transform.resize", "line_number": 27, "usage_type": "call"}, {"api_name": "skimage.transform", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.from_numpy", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "478950124", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Notebook Tab Drag and Drop Example (GTK+3 & Python3)\n#\n# Requirements:\n# * Python (>=3.5)\n# https://www.python.org/\n#\n# * GTK+ (>=3.0)\n# https://www.gtk.org/\n#\n# Dependencies:\n# * PyGObject\n# https://pygobject.readthedocs.io/en/latest/\n#\n# References:\n# * GtkNotebook: GTK+ 3 Reference Manual\n# https://developer.gnome.org/gtk3/stable/GtkNotebook.html#gtk-notebook-set-tab-detachable\n#\n# * tests/testnotebookdnd.c - master - GNOME / gtk\n# https://gitlab.gnome.org/GNOME/gtk/blob/master/tests/testnotebookdnd.c\n#\n# * Drag and drop (DND) of gtk.Notebook tab to another widget\n# http://python.6.x6.nabble.com/Drag-and-drop-DND-of-gtk-Notebook-tab-to-another-widget-td1948624.html\n#\n# License: CC0 1.0\n# see https://creativecommons.org/publicdomain/zero/1.0/legalcode.txt\n\nimport gi\n\ngi.require_version(\"Gdk\", \"3.0\")\ngi.require_version(\"Gtk\", \"3.0\")\n\nfrom gi.repository import Gdk, Gio, Gtk\n\n\ndef main():\n app = Gtk.Application.new(\"com.github.ma8ma.test\", Gio.ApplicationFlags.FLAGS_NONE)\n app.connect(\"activate\", on_activate)\n app.run([])\n\n\ndef on_activate(app):\n box = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)\n\n notebook = Gtk.Notebook.new()\n notebook.connect_after(\"drag-begin\", _sig_notebook_drag_begin)\n notebook.connect_after(\"drag-data-get\", _sig_notebook_drag_data_get)\n box.pack_start(notebook, True, True, 0)\n\n label = Gtk.Label.new(\"Drag me!\")\n child_widget = Gtk.Entry.new()\n notebook.append_page(child_widget, label)\n notebook.set_tab_detachable(child_widget, True) # Enable DnD\n\n button = Gtk.Button.new_with_label(\"Drop here!\")\n # Gdk.DragAction *must* be MOVE. (GTK+ 3.18)\n button.drag_dest_set(\n Gtk.DestDefaults.HIGHLIGHT | Gtk.DestDefaults.DROP | Gtk.DestDefaults.MOTION,\n [Gtk.TargetEntry.new(\"GTK_NOTEBOOK_TAB\", Gtk.TargetFlags.SAME_APP, 0)],\n Gdk.DragAction.MOVE,\n )\n button.connect(\"drag-drop\", _sig_drag_drop)\n button.connect_after(\"drag-data-received\", _sig_drag_data_received)\n box.pack_start(button, True, True, 0)\n\n window = Gtk.ApplicationWindow.new(app)\n window.set_default_size(300, 300)\n window.set_title(\"Notebook Tab DnD Example\")\n window.add(box)\n window.show_all()\n\n\ndrag_page_number = 0\n\n\ndef _sig_notebook_drag_begin(widget, context):\n global drag_page_number\n drag_page_number = widget.get_current_page()\n print(\"drag-begin:\", drag_page_number, widget)\n\n\ndef _sig_notebook_drag_data_get(widget, context, selection, info, timestamp):\n print(\"drag-data-get:\", drag_page_number, selection.get_target())\n selection.set(selection.get_target(), 8, b\"%d\" % (drag_page_number,))\n\n\ndef _sig_drag_drop(widget, context, x, y, timestamp):\n print(\"drag-drop:\", widget)\n if \"GTK_NOTEBOOK_TAB\" in context.list_targets():\n widget.drag_get_data(context, \"GTK_NOTEBOOK_TAB\")\n context.finish(True, False, timestamp)\n return True\n\n\ndef _sig_drag_data_received(widget, context, x, y, selection, info, timestamp):\n print(\"drag-data-received:\", selection.get_data())\n src_widget = Gtk.drag_get_source_widget(context)\n the_page_number = int(selection.get_data())\n child_widget = src_widget.get_nth_page(the_page_number)\n child_widget.set_text(\"Thank you!\")\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "letsgo/dnd.py", "file_name": "dnd.py", "file_ext": "py", "file_size_in_byte": 3314, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "gi.require_version", "line_number": 32, "usage_type": "call"}, {"api_name": "gi.require_version", "line_number": 33, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Application.new", "line_number": 39, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Application", "line_number": 39, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 39, "usage_type": "name"}, {"api_name": "gi.repository.Gio.ApplicationFlags", "line_number": 39, "usage_type": "attribute"}, {"api_name": "gi.repository.Gio", "line_number": 39, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Box.new", "line_number": 45, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Box", "line_number": 45, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 45, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Orientation", "line_number": 45, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk.Notebook.new", "line_number": 47, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Notebook", "line_number": 47, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 47, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Label.new", "line_number": 52, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Label", "line_number": 52, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 52, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Entry.new", "line_number": 53, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Entry", "line_number": 53, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 53, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.Button.new_with_label", "line_number": 57, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.Button", "line_number": 57, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 57, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.DestDefaults", "line_number": 60, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 60, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.TargetEntry.new", "line_number": 61, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.TargetEntry", "line_number": 61, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 61, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.TargetFlags", "line_number": 61, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk.DragAction", "line_number": 62, "usage_type": "attribute"}, {"api_name": "gi.repository.Gdk", "line_number": 62, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.ApplicationWindow.new", "line_number": 68, "usage_type": "call"}, {"api_name": "gi.repository.Gtk.ApplicationWindow", "line_number": 68, "usage_type": "attribute"}, {"api_name": "gi.repository.Gtk", "line_number": 68, "usage_type": "name"}, {"api_name": "gi.repository.Gtk.drag_get_source_widget", "line_number": 99, "usage_type": "call"}, {"api_name": "gi.repository.Gtk", "line_number": 99, "usage_type": "name"}]} +{"seq_id": "37593740", "text": "\"\"\" Protocol tests \"\"\"\n\nimport unittest\nfrom json import JSONEncoder, JSONDecoder\nfrom tests import common\n\nfrom pynpoint import config, protocol, redis\nfrom pynpoint.errors import ProtocolError, RequestError\nfrom pynpoint.mixins import Mixin\nfrom pynpoint.protocol import handlers\nfrom pynpoint.protocol.handlers import announcement, export, query\n\n\nclass ProtocolTestCase(unittest.TestCase):\n \"\"\" Protocol tests \"\"\"\n\n def setUp(self):\n self.packet = protocol.Packet('test', {'test': 'data'})\n self.packet_dup = protocol.Packet('test', {'test': 'data'})\n\n def test_packet_encode(self):\n self.assertEqual('pnpt|\\x01|\\x00\\x00\\x00\\x15|test\\n{\"test\": \"data\"}!!',\n self.packet.encode())\n\n def test_packet_decode(self):\n decoder = protocol.Packet.decode\n self.assertEqual(self.packet, decoder(self.packet.encode()))\n\n bad_packets = ['pnpt|\\x01|\\x00\\x00\\x00|test\\n{\"test\": \"data\"}!!',\n 'pnpt|foo']\n\n for bad_packet in bad_packets:\n self.assertRaises(protocol.ProtocolError, decoder, bad_packet)\n\n def test_packet_equality(self):\n self.assertTrue(self.packet == self.packet_dup)\n self.assertFalse(self.packet is self.packet_dup)\n self.assertFalse(self.packet == None)\n\n\n\nclass ProtocolHandlersTestCase(unittest.TestCase):\n \"\"\" Protocol Handler Test cases \"\"\"\n\n def setUp(self):\n common.reset_config()\n config.Config()\n\n\n self.test_host = {\"host\": \"127.0.0.1\", \n \"port\": 9999,\n \"addresses\": [\"123.45.67.8\"]}\n\n r = redis.Redis()\n r.setex('host_123.45.67.8', JSONEncoder().encode(self.test_host), 10)\n\n\n self.announcement = protocol.Packet('hi!', {'host': '123.45.67.89'})\n self.export = protocol.Packet('i have', {'host': '123.45.67.89', \n \"type\": 'addresses',\n 'export':[\"123.45.67.89\"]})\n self.query = protocol.Packet(\"heard of?\", {\"type\": 'addresses',\n \"value\": '123.45.67.8'})\n\n def bad_data(self, request_type):\n bd = [None, [], \"\", {}]\n return [protocol.Packet(request_type, d) for d in bd]\n\n def test_handle_export(self):\n handler = protocol.handle_packet\n self.assertTrue(handler(self.export))\n\n # Validator raises on invalid payload\n for bad_packet in self.bad_data('i have'):\n self.assertRaises(RequestError, handler, bad_packet)\n\n def test_handle_announcement(self):\n handler = protocol.handle_packet\n self.assertTrue(handler(self.announcement))\n\n\n # Validator raises on invalid payload\n for bad_packet in self.bad_data('hi!'):\n self.assertRaises(RequestError, handler, bad_packet)\n\n def test_handle_query(self):\n handler = protocol.handle_packet\n self.assertTrue(handler(self.query))\n self.assertEqual(handler(self.query), [self.test_host])\n\n # Validator raises on invalid payload\n for bad_packet in self.bad_data('heard of?'):\n self.assertRaises(RequestError, handler, bad_packet)\n", "sub_path": "tests/protocol_unittest.py", "file_name": "protocol_unittest.py", "file_ext": "py", "file_size_in_byte": 3235, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "unittest.TestCase", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pynpoint.protocol.Packet", "line_number": 18, "usage_type": "call"}, {"api_name": "pynpoint.protocol", "line_number": 18, "usage_type": "name"}, {"api_name": "pynpoint.protocol.Packet", "line_number": 19, "usage_type": "call"}, {"api_name": "pynpoint.protocol", "line_number": 19, "usage_type": "name"}, {"api_name": "pynpoint.protocol.Packet", "line_number": 26, "usage_type": "attribute"}, {"api_name": "pynpoint.protocol", "line_number": 26, "usage_type": "name"}, {"api_name": "pynpoint.protocol.ProtocolError", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pynpoint.protocol", "line_number": 33, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 42, "usage_type": "attribute"}, {"api_name": "tests.common.reset_config", "line_number": 46, "usage_type": "call"}, {"api_name": "tests.common", "line_number": 46, "usage_type": "name"}, {"api_name": "pynpoint.config.Config", "line_number": 47, "usage_type": "call"}, {"api_name": "pynpoint.config", "line_number": 47, "usage_type": "name"}, {"api_name": "pynpoint.redis.Redis", "line_number": 54, "usage_type": "call"}, {"api_name": "pynpoint.redis", "line_number": 54, "usage_type": "name"}, {"api_name": "json.JSONEncoder", "line_number": 55, "usage_type": "call"}, {"api_name": "pynpoint.protocol.Packet", "line_number": 58, "usage_type": "call"}, {"api_name": "pynpoint.protocol", "line_number": 58, "usage_type": "name"}, {"api_name": "pynpoint.protocol.Packet", "line_number": 59, "usage_type": "call"}, {"api_name": "pynpoint.protocol", "line_number": 59, "usage_type": "name"}, {"api_name": "pynpoint.protocol.Packet", "line_number": 62, "usage_type": "call"}, {"api_name": "pynpoint.protocol", "line_number": 62, "usage_type": "name"}, {"api_name": "pynpoint.protocol.Packet", "line_number": 67, "usage_type": "call"}, {"api_name": "pynpoint.protocol", "line_number": 67, "usage_type": "name"}, {"api_name": "pynpoint.protocol.handle_packet", "line_number": 70, "usage_type": "attribute"}, {"api_name": "pynpoint.protocol", "line_number": 70, "usage_type": "name"}, {"api_name": "pynpoint.errors.RequestError", "line_number": 75, "usage_type": "argument"}, {"api_name": "pynpoint.protocol.handle_packet", "line_number": 78, "usage_type": "attribute"}, {"api_name": "pynpoint.protocol", "line_number": 78, "usage_type": "name"}, {"api_name": "pynpoint.errors.RequestError", "line_number": 84, "usage_type": "argument"}, {"api_name": "pynpoint.protocol.handle_packet", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pynpoint.protocol", "line_number": 87, "usage_type": "name"}, {"api_name": "pynpoint.errors.RequestError", "line_number": 93, "usage_type": "argument"}]} +{"seq_id": "568097488", "text": "import numpy as np\nimport cv2\n\n# Identify pixels above the threshold\n# Threshold of RGB > 160 does a nice job of identifying ground pixels only\ndef color_thresh(img, rgb_thresh=(160, 160, 160)):\n # Create an array of zeros same xy size as img, but single channel\n color_select = np.zeros_like(img[:,:,0])\n # Require that each pixel be above all three threshold values in RGB\n # above_thresh will now contain a boolean array with \"True\"\n # where threshold was met\n above_thresh = (img[:,:,0] > rgb_thresh[0]) \\\n & (img[:,:,1] > rgb_thresh[1]) \\\n & (img[:,:,2] > rgb_thresh[2])\n # Index the array of zeros with the boolean array and set to 1\n color_select[above_thresh] = 255\n # Return the binary image\n return color_select\n\n# Function for detecting rocks input: image, output: rocks masked\ndef detect_rock(img):\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n # define range of yellow color in HSV\n lower_yellow = np.array([80,100,100])\n upper_yellow = np.array([100,255,255])\n\n # Threshold the HSV image to get only yellow colors\n mask = cv2.inRange(hsv, lower_yellow, upper_yellow)\n \n return mask\n\n# Detecting obstacles function input: terrian, output: obstacle\ndef detect_obstacle(img):\n kernel = np.ones((5,5))\n #terrain = color_thresh(img)\n obstacle = cv2.dilate(img,kernel,iterations=1) \n obstacle = cv2.bitwise_and(obstacle,cv2.bitwise_not(img))\n return obstacle\n\n# Define a function to convert to rover-centric coordinates\ndef rover_coords(binary_img):\n # Identify nonzero pixels\n ypos, xpos = binary_img.nonzero()\n # Calculate pixel positions with reference to the rover position being at the \n # center bottom of the image. \n x_pixel = np.absolute(ypos - binary_img.shape[0]).astype(np.float)\n y_pixel = -(xpos - binary_img.shape[0]).astype(np.float)\n return x_pixel, y_pixel\n\n\n# Define a function to convert to radial coords in rover space\ndef to_polar_coords(x_pixel, y_pixel):\n # Convert (x_pixel, y_pixel) to (distance, angle) \n # in polar coordinates in rover space\n # Calculate distance to each pixel\n dist = np.sqrt(x_pixel**2 + y_pixel**2)\n # Calculate angle away from vertical for each pixel\n angles = np.arctan2(y_pixel, x_pixel)\n return dist, angles\n\n# Define a function to apply a rotation to pixel positions\ndef rotate_pix(xpix, ypix, yaw):\n # TODO:\n # Convert yaw to radians\n # Apply a rotation\n yaw_rad = (yaw*np.pi)/180\n # Apply a rotation\n xpix_rotated = np.cos(yaw_rad)*xpix - np.sin(yaw_rad)*ypix\n ypix_rotated = np.sin(yaw_rad)*xpix + np.sin(yaw_rad)*ypix\n # Return the result\n return xpix_rotated, ypix_rotated\n\n# Define a function to perform a translation\ndef translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale): \n # TODO:\n # Apply a scaling and a translation\n xpix_translated = xpos + xpix_rot/scale\n ypix_translated = ypos + ypix_rot/scale\n # Return the result\n return xpix_translated, ypix_translated\n\n# Define a function to apply rotation and translation (and clipping)\n# Once you define the two functions above this function should work\ndef pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale):\n # Apply rotation\n xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)\n # Apply translation\n xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale)\n # Perform rotation, translation and clipping all at once\n x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)\n y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)\n # Return the result\n return x_pix_world, y_pix_world\n\n# Define a function to perform a perspective transform\ndef perspect_transform(img, src, dst):\n \n M = cv2.getPerspectiveTransform(src, dst)\n warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))# keep same size as input image\n \n return warped\n\n\n# Apply the above functions in succession and update the Rover state accordingly\ndef perception_step(Rover):\n # Perform perception steps to update Rover()\n # TODO: \n # NOTE: camera image is coming to you in Rover.img\n\n #some constants\n bottom_offset = 6\n dst_size = 5\n scale = 10\n #Add mask to limit the viewable area\n vertices = np.array([[(0,0),(Rover.img.shape[1],0),(Rover.img.shape[1]/2 + dst_size, Rover.img.shape[0] - bottom_offset),(Rover.img.shape[1]/2 - dst_size, Rover.img.shape[0] - bottom_offset)]],dtype=np.int32)\n mask = np.zeros_like(Rover.img[:,:,0])\n cv2.fillPoly(mask, vertices, 255)\n #kernel for morphologyEx function\n kernel = np.ones((5,5))\n # 1) Define source and destination points for perspective transform\n source = np.float32([[14, 140], [301 ,140],[200, 96], [118, 96]])\n destination = np.float32([[Rover.img.shape[1]/2 - dst_size, Rover.img.shape[0] - bottom_offset],\n [Rover.img.shape[1]/2 + dst_size, Rover.img.shape[0] - bottom_offset],\n [Rover.img.shape[1]/2 + dst_size, Rover.img.shape[0] - 2*dst_size - bottom_offset],\n [Rover.img.shape[1]/2 - dst_size, Rover.img.shape[0] - 2*dst_size - bottom_offset],\n ])\n \n # 2) Apply perspective transform\n warped = perspect_transform(Rover.img,source,destination)\n\n # 3) Apply color threshold to identify navigable terrain/obstacles/rock samples\n #detect terrain\n terrain = color_thresh(warped)\n #this line for smoothing terrain\n terrain = cv2.morphologyEx(terrain,cv2.MORPH_OPEN,kernel,iterations=1)\n #terrain = cv2.morphologyEx(terrain,cv2.MORPH_CLOSE,kernel,iterations=1)\n #detect obstacles\n obstacle = detect_obstacle(terrain)\n #detect rocks\n rock = detect_rock(warped)\n\n #mask terrain and obstacles\n obstacle = cv2.bitwise_and(mask,obstacle)\n terrain = cv2.bitwise_and(mask,terrain)\n # 4) Update Rover.vision_image (this will be displayed on left side of screen)\n # Example: Rover.vision_image[:,:,0] = obstacle color-thresholded binary image\n # Rover.vision_image[:,:,1] = rock_sample color-thresholded binary image\n # Rover.vision_image[:,:,2] = navigable terrain color-thresholded binary image\n Rover.vision_image[:,:,0] = obstacle\n Rover.vision_image[:,:,1] = rock\n Rover.vision_image[:,:,2] = terrain\n\n # 5) Convert map image pixel values to rover-centric coords\n xpix_o,ypix_o = rover_coords(Rover.vision_image[:,:,0])\n xpix_r,ypix_r = rover_coords(Rover.vision_image[:,:,1])\n xpix_t,ypix_t = rover_coords(Rover.vision_image[:,:,2])\n # 6) Convert rover-centric pixel values to world coordinates\n x_pix_o_world,y_pix_o_world = pix_to_world(xpix_o,ypix_o,Rover.pos[0],Rover.pos[1],Rover.yaw,Rover.worldmap.shape[0],10)\n x_pix_r_world,y_pix_r_world = pix_to_world(xpix_r,ypix_r,Rover.pos[0],Rover.pos[1],Rover.yaw,Rover.worldmap.shape[0],10)\n x_pix_t_world,y_pix_t_world = pix_to_world(xpix_t,ypix_t,Rover.pos[0],Rover.pos[1],Rover.yaw,Rover.worldmap.shape[0],10)\n # 7) Update Rover worldmap (to be displayed on right side of screen)\n # Example: Rover.worldmap[obstacle_y_world, obstacle_x_world, 0] += 1\n # Rover.worldmap[rock_y_world, rock_x_world, 1] += 1\n # Rover.worldmap[navigable_y_world, navigable_x_world, 2] += 1\n if Rover.pitch <= 0.75 or Rover.pitch >= 359.25:\n if Rover.roll <= 1 or Rover.roll >= 359:\n Rover.worldmap[y_pix_t_world, x_pix_t_world, 0] += 1\n Rover.worldmap[y_pix_r_world, x_pix_r_world, 1] += 1\n Rover.worldmap[y_pix_o_world, x_pix_o_world, 2] += 1\n # 8) Convert rover-centric pixel positions to polar coordinates\n # Update Rover pixel distances and angles\n # Rover.nav_dists = rover_centric_pixel_distances\n # Rover.nav_angles = rover_centric_angles\n Rover.nav_dists, Rover.nav_angles = to_polar_coords(xpix_t,ypix_t)\n \n \n \n \n return Rover", "sub_path": "RoboND-Rover-Project/code/perception.py", "file_name": "perception.py", "file_ext": "py", "file_size_in_byte": 7943, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.zeros_like", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 25, "usage_type": "call"}, {"api_name": "cv2.inRange", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 34, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 36, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 37, "usage_type": "call"}, {"api_name": "cv2.bitwise_not", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.absolute", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.float", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.arctan2", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 66, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.int_", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.int_", "line_number": 91, "usage_type": "call"}, {"api_name": "cv2.getPerspectiveTransform", "line_number": 98, "usage_type": "call"}, {"api_name": "cv2.warpPerspective", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.int32", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.zeros_like", "line_number": 116, "usage_type": "call"}, {"api_name": "cv2.fillPoly", "line_number": 117, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 122, "usage_type": "call"}, {"api_name": "cv2.morphologyEx", "line_number": 135, "usage_type": "call"}, {"api_name": "cv2.MORPH_OPEN", "line_number": 135, "usage_type": "attribute"}, {"api_name": "cv2.bitwise_and", "line_number": 143, "usage_type": "call"}, {"api_name": "cv2.bitwise_and", "line_number": 144, "usage_type": "call"}]} +{"seq_id": "426159607", "text": "from sklearn import neighbors\n\ndef train_NCC_model(training_set, training_labels, number_of_neighbors):\n nnc_model = neighbors.KNeighborsClassifier(number_of_neighbors, weights=\"uniform\")\n nnc_model.fit(training_set, training_labels)\n return nnc_model\n\ndef classify_NNC(testing_set, trained_model):\n return trained_model.predict(testing_set)\n\nif __name__ == '__main__':\n training_set = [[1,2], [1,0], [0,-1] ,[-2,-1]]\n labels = [1,1,2,2]\n testing_set = [[2,0], [0,0], [-1,1] ,[-2,0]]\n number_of_neighbors = 2\n\n trained_model = train_NCC_model(training_set, labels, number_of_neighbors)\n predicted_classes = classify_NNC(testing_set, trained_model)\n print(predicted_classes)", "sub_path": "Exam_2018/Nearest_Neighbor.py", "file_name": "Nearest_Neighbor.py", "file_ext": "py", "file_size_in_byte": 708, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 4, "usage_type": "call"}, {"api_name": "sklearn.neighbors", "line_number": 4, "usage_type": "name"}]} +{"seq_id": "188994716", "text": "from tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkinter import messagebox as mb\r\nfrom datetime import datetime\r\nimport MenuPrincipal\r\n\r\n\r\nclass fb:\r\n def __init__(self):\r\n root=Tk()\r\n root.focus_force()\r\n root.title(\"Feedback\")\r\n root.geometry(\"1080x720+400+200\")\r\n root.resizable(False, False)\r\n\r\n mainframe=ttk.Frame(root, padding=\"3 3 12 12\")\r\n\r\n mainframe.grid(row=0, column=0, sticky=(N,S,E,W))\r\n\r\n mainframe.columnconfigure(0, weight=1)\r\n mainframe.rowconfigure(0, weight=1)\r\n t=datetime.now()\r\n\r\n\r\n ################################Fonction #############################\"\r\n def launch_MP(event=0):\r\n root.destroy()\r\n MenuPrincipal.MP()\r\n\r\n #affiche dans un fichier users.txt qui sera créé les informations de l'utilisateur\r\n def envoyer(event=0):\r\n with open('users.txt', 'a') as users:\r\n if entr_nom.get()=='':\r\n users.write(\"Nom: Anonyme\"+'\\n')\r\n else:\r\n users.write(\"Nom: \"+entr_nom.get()+'\\n')\r\n entr_nom.delete(0,END)\r\n \r\n users.write(\"Optimisation: \"+note_optim.get()+'\\n')\r\n note_optim.delete(0,\"end\")\r\n note_optim.insert(0,0)\r\n users.write(\"Contenu: \"+note_contenu.get()+'\\n')\r\n note_contenu.delete(0,\"end\")\r\n note_contenu.insert(0,0)\r\n if len(text.get(\"1.0\",END))==1:\r\n users.write(\"Commentaire: Aucun\"+'\\n')\r\n else:\r\n users.write(\"Commentaire: \"+text.get(\"1.0\",END))\r\n text.delete(1.0,END)\r\n users.write('Connecté à {}'.format(t)+'\\n'+'\\n')\r\n\r\n mb.showinfo(\"Enquête de satisfaction\", \"Merci d'avoir laissé un commentaire\")\r\n\r\n users.close()\r\n\r\n root.destroy()\r\n MenuPrincipal.MP()\r\n \r\n\r\n\r\n ##################################################\r\n \r\n first=Label(mainframe, text=\"Remplis ce qui te convient pour nous aider à progresser\", font='Arial 16')\r\n first.grid(row=0, column=1, columnspan=3, pady=10,sticky='nsew')\r\n\r\n\r\n nom=Label(mainframe, text=\"Quel est ton nom ? :\")\r\n nom.grid(row=2, column=0, pady=20)\r\n\r\n entr_nom= ttk.Entry(mainframe) \r\n entr_nom.grid(column=1, row=2, columnspan=3, padx=10, sticky=(E,W),pady=20)\r\n\r\n\r\n\r\n optim=Label(mainframe, text=\"Trouvez-vous l'application bien optimisée (sur 10) ? \")\r\n optim.grid(row=3, column=0, columnspan=2, pady=10, sticky='nsew')\r\n\r\n note1= StringVar()\r\n\r\n note_optim=Spinbox(mainframe, from_=0, to=10 ,textvariable=note1, stat='readonly')\r\n note_optim.grid(row=3, column=3, pady=10)\r\n\r\n\r\n\r\n contenu=Label(mainframe, text=\"Trouvez-vous que les contenus des outils convenables (sur 10) ? \")\r\n contenu.grid(row=4, column=0, columnspan=2, pady=10, sticky='nsew')\r\n\r\n\r\n note2= StringVar()\r\n\r\n note_contenu=Spinbox(mainframe, from_=0, to=10 ,textvariable=note2, stat='readonly')\r\n note_contenu.grid(row=4, column=3, pady=10)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n com=Label(mainframe, text=\"COMMENTAIRES :\")\r\n com.grid(row=7, column=0, pady=20)\r\n\r\n #permet d'écrire un texte dans un champ qui fait un retour à la ligne pour un mot en fin de ligne\r\n\r\n text= Text(mainframe, width=50, height=3, wrap=WORD)#wrap=WORD: ne coupe pas le WORD en fin de file\r\n text.grid(row=7, column=1, padx=10, columnspan=2)\r\n\r\n\r\n\r\n send=ttk.Button(mainframe, text='Envoyer', command=envoyer)\r\n send.grid(row=8, column=2, pady=15)\r\n\r\n ttk.Button(mainframe, text= \"Revenir au Menu Principal\", command= launch_MP).grid(row=0, column=10)\r\n\r\n root.bind(\"\", envoyer)\r\n root.bind(\"\", launch_MP)\r\n\r\n\r\n root.mainloop()\r\n", "sub_path": "ToolsManager/feedback.py", "file_name": "feedback.py", "file_ext": "py", "file_size_in_byte": 3970, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "tkinter.ttk.Frame", "line_number": 16, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 16, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}, {"api_name": "MenuPrincipal.MP", "line_number": 28, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 52, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 52, "usage_type": "name"}, {"api_name": "MenuPrincipal.MP", "line_number": 57, "usage_type": "call"}, {"api_name": "tkinter.ttk.Entry", "line_number": 70, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 70, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 110, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 110, "usage_type": "name"}, {"api_name": "tkinter.ttk.Button", "line_number": 113, "usage_type": "call"}, {"api_name": "tkinter.ttk", "line_number": 113, "usage_type": "name"}]} +{"seq_id": "395981984", "text": "# -*- coding: utf-8 -*-\nfrom datetime import datetime\nfrom bitfinex import get_stats\nfrom CME import get_CME_news\nfrom bitcoin86 import get_86_news\nfrom thirty_six_kr import get_36_news\nfrom email.mime.text import MIMEText\nimport smtplib\nfrom email.utils import formataddr\nfrom db_init import mysql_init\nfrom html_generator import html_generator\n\nimport re\nimport configparser\n\n\nclass NewsRecommendation:\n def __init__(self):\n '新闻推荐类初始化。'\n print('新闻推荐类初始化。')\n # 关键字及其权重定义\n self.keywords = {\n 'cn': {'比特币': 0.4,\n '莱特币': 0.25,\n '数字货币': 0.05,\n '价格': 0.02,\n '比特币期货': 0.1,\n '比特币交易': 0.07,\n '期货': 0.1,\n '交易': 0.01},\n 'en': {'bitcoin': 0.4,\n 'litecoin': 0.25,\n 'digital currency': 0.05,\n 'price': 0.02,\n 'bitcoin future': 0.1,\n 'bitcoin trading': 0.07,\n 'future': 0.1,\n 'business': 0.01}}\n self._time = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n # 电子货币价格信息\n self.stats = get_stats()\n # 推荐的新闻\n self.four_news = self.news_filter(self.keywords)\n self.create_html()\n self.sendEmail()\n\n def create_html(self):\n filename = self._time\n fp = open(r\"D:\\bitcoin_crawier\\email_response\\mail\\templates\\mail\\{}.html\".format(filename), 'w')\n html_source = html_generator([self.stats] + self.four_news, self._time)\n fp.write(html_source)\n fp.close()\n\n def sendEmail(self):\n config = configparser.ConfigParser()\n config.readfp(open('mail_init.ini'))\n\n # 获取邮件发送者\n sender = config.get('sender', 'username')\n password = config.get('sender', 'password')\n # 获取邮件发送者\n engine = mysql_init()\n # 初始化数据库\n conn = engine.connect()\n # 从数据库中获取要发送的邮箱列表\n sql_query = 'select email from mail_address'\n receiver = conn.execute(sql_query).fetchall()\n addr_list = [addr[0] for addr in receiver]\n # 发送邮件\n msg = MIMEText('链接'.format('http://121.40.87.226:9090/mail/?'+self._time), 'html', 'utf-8')\n msg['From'] = formataddr([\"user_name\", sender])\n msg['Subject'] = '新闻推荐'\n smtp = smtplib.SMTP('smtp.sina.com', 25)\n smtp.set_debuglevel(1)\n smtp.starttls()\n # 登录并发送\n smtp.login(sender, password)\n smtp.sendmail(sender, addr_list, msg.as_string())\n print('发送成功')\n smtp.quit()\n\n def news_filter(self, keywords):\n\n # 中英文新闻分别过滤,因为用不同的关键字\n chinese_news = get_86_news() + get_36_news()\n english_news = get_CME_news()\n # english_news = []\n\n print('新闻关键词匹配度计算......')\n # 中文新闻匹配分数计算\n for cn_news in chinese_news:\n cn_news['score'] = 0\n text = cn_news['title'] + cn_news['abstract']\n for keyword, weight in keywords['cn'].items():\n count = 0\n for i in range(len(text) - len(keyword)):\n if re.search(keyword, text[i: i + len(keyword)]\n ) is not None:\n count += 1\n cn_news['score'] += count * weight\n\n # 英文新闻匹配分数计算\n for en_news in english_news:\n en_news['score'] = 0\n text = en_news['title'] + en_news['abstract']\n for keyword, weight in keywords['en'].items():\n count = 0\n for i in range(len(text) - len(keyword)):\n if re.search(keyword, text[i: i + len(keyword)]\n ) is not None:\n count += 1\n en_news['score'] += count * weight\n\n # 新闻拼接并按匹配分数排序\n all_news = chinese_news + english_news\n all_news = sorted(all_news, key=lambda x: x['score'], reverse=True)\n print('新闻关键词匹配度计算完毕。')\n return all_news[:4]\n\n\nif __name__ == '__main__':\n newsObj = NewsRecommendation()\n for i in newsObj.four_news:\n for k, v in i.items():\n print(k, ':', v)\n print('\\n')\n", "sub_path": "bitcoin_scrapy/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4598, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "datetime.datetime.now", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "name"}, {"api_name": "bitfinex.get_stats", "line_number": 41, "usage_type": "call"}, {"api_name": "html_generator.html_generator", "line_number": 50, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 55, "usage_type": "call"}, {"api_name": "db_init.mysql_init", "line_number": 62, "usage_type": "call"}, {"api_name": "email.mime.text.MIMEText", "line_number": 70, "usage_type": "call"}, {"api_name": "email.utils.formataddr", "line_number": 71, "usage_type": "call"}, {"api_name": "smtplib.SMTP", "line_number": 73, "usage_type": "call"}, {"api_name": "bitcoin86.get_86_news", "line_number": 85, "usage_type": "call"}, {"api_name": "thirty_six_kr.get_36_news", "line_number": 85, "usage_type": "call"}, {"api_name": "CME.get_CME_news", "line_number": 86, "usage_type": "call"}, {"api_name": "re.search", "line_number": 97, "usage_type": "call"}, {"api_name": "re.search", "line_number": 109, "usage_type": "call"}]} +{"seq_id": "188336936", "text": "from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom blog.models import Blog,User\nfrom django.urls import reverse\nfrom .form import UserForm\n\n# Create your views here.\ndef index(request):\n blogs = Blog.objects.all()\n return render(request,'back/index.html',{'blogs':blogs})\n\ndef add(request):\n if request.method == 'GET':\n return render(request,'back/add.html',)\n if request.method == 'POST':\n title = request.POST.get('title')\n content = request.POST.get('content')\n Blog.objects.create(title=title,content=content)\n # return redirect('/back/') #新增成功,重定向到主页\n return redirect(reverse('back:index')) #用revers生成路由地址\n\n\ndef delete(request,bid):\n blog = Blog.objects.get(pk=bid)\n if blog:\n blog.delete()\n return redirect(reverse('back:index')) #用revers生成路由地址\n\n\ndef edit(request,bid):\n if request.method == 'GET':\n blog = Blog.objects.get(pk=bid)\n return render(request,'back/edit.html',{'blog':blog})\n if request.method == 'POST':\n title = request.POST.get('title')\n content = request.POST.get('content')\n blog = Blog.objects.get(pk=bid)\n blog.title = title\n blog.content = content\n blog.save()\n # return redirect('/back/') #新增成功,重定向到主页\n return redirect(reverse('back:index')) #用revers生成路由地址\n\ndef detail(request,bid):\n blog = Blog.objects.get(pk=bid)\n # title = blog.title\n # content = blog.content\n return render(request,'back/detail.html',{'blog':blog})\n\ndef reg(request):\n if request.method == 'GET':\n form = UserForm()\n return render(request,'back/auth/reg.html',{'form':form})\n if request.method == 'POST':\n user = UserForm(request.POST)\n if user.is_valid():\n # username = user.cleaned_data.get('username')\n # password = user.cleaned_data.get('password')\n # User.objects.create(username=username,password=password)\n User.objects.create(** user.cleaned_data) #传位置参数,cleaned_data是一个字典的结果\n return redirect('back:login')\n return render(request,'back/auth/reg.html',{'form':user})\n\ndef login(request):\n if request.method == 'GET':\n form = UserForm()\n return render(request,'back/auth/login.html',{'form':form})\n if request.method == 'POST':\n user = UserForm(request.POST)\n if user.is_valid():\n users = User.objects.filter(**user.cleaned_data)\n if users:\n request.session['user'] = users[0].username\n return redirect(reverse('back:index'))\n return render(request,'back/auth/login.html',{'form':user})\n\ndef logout(request):\n request.session.flush()\n return redirect(reverse('back:login'))", "sub_path": "back/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2890, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "blog.models.Blog.objects.all", "line_number": 9, "usage_type": "call"}, {"api_name": "blog.models.Blog.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "blog.models.Blog", "line_number": 9, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 10, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 14, "usage_type": "call"}, {"api_name": "blog.models.Blog.objects.create", "line_number": 18, "usage_type": "call"}, {"api_name": "blog.models.Blog.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "blog.models.Blog", "line_number": 18, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 20, "usage_type": "call"}, {"api_name": "blog.models", "line_number": 24, "usage_type": "name"}, {"api_name": "blog.models.Blog.objects.get", "line_number": 24, "usage_type": "call"}, {"api_name": "blog.models.Blog.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "blog.models.Blog", "line_number": 24, "usage_type": "name"}, {"api_name": "blog.models", "line_number": 25, "usage_type": "name"}, {"api_name": "blog.models.delete", "line_number": 26, "usage_type": "call"}, {"api_name": "blog.models", "line_number": 26, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 27, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 27, "usage_type": "call"}, {"api_name": "blog.models", "line_number": 32, "usage_type": "name"}, {"api_name": "blog.models.Blog.objects.get", "line_number": 32, "usage_type": "call"}, {"api_name": "blog.models.Blog.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "blog.models.Blog", "line_number": 32, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 33, "usage_type": "call"}, {"api_name": "blog.models", "line_number": 33, "usage_type": "name"}, {"api_name": "blog.models", "line_number": 37, "usage_type": "name"}, {"api_name": "blog.models.Blog.objects.get", "line_number": 37, "usage_type": "call"}, {"api_name": "blog.models.Blog.objects", "line_number": 37, "usage_type": "attribute"}, {"api_name": "blog.models.Blog", "line_number": 37, "usage_type": "name"}, {"api_name": "blog.models.title", "line_number": 38, "usage_type": "attribute"}, {"api_name": "blog.models", "line_number": 38, "usage_type": "name"}, {"api_name": "blog.models.content", "line_number": 39, "usage_type": "attribute"}, {"api_name": "blog.models", "line_number": 39, "usage_type": "name"}, {"api_name": "blog.models.save", "line_number": 40, "usage_type": "call"}, {"api_name": "blog.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 42, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 42, "usage_type": "call"}, {"api_name": "blog.models", "line_number": 45, "usage_type": "name"}, {"api_name": "blog.models.Blog.objects.get", "line_number": 45, "usage_type": "call"}, {"api_name": "blog.models.Blog.objects", "line_number": 45, "usage_type": "attribute"}, {"api_name": "blog.models.Blog", "line_number": 45, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 48, "usage_type": "call"}, {"api_name": "blog.models", "line_number": 48, "usage_type": "name"}, {"api_name": "form.UserForm", "line_number": 52, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 53, "usage_type": "call"}, {"api_name": "form.UserForm", "line_number": 55, "usage_type": "call"}, {"api_name": "blog.models.User.objects.create", "line_number": 60, "usage_type": "call"}, {"api_name": "blog.models.User.objects", "line_number": 60, "usage_type": "attribute"}, {"api_name": "blog.models.User", "line_number": 60, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 61, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 62, "usage_type": "call"}, {"api_name": "form.UserForm", "line_number": 66, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 67, "usage_type": "call"}, {"api_name": "form.UserForm", "line_number": 69, "usage_type": "call"}, {"api_name": "blog.models.User.objects.filter", "line_number": 71, "usage_type": "call"}, {"api_name": "blog.models.User.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "blog.models.User", "line_number": 71, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 74, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 74, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 75, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 79, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 79, "usage_type": "call"}]} +{"seq_id": "481833702", "text": "# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport stat\nimport os.path\nimport platform\nimport time\nimport random\nimport tempfile\n\"\"\"\nimport win32api\n\"\"\"\nimport sys\n\ndef temp_file(suffix='.txt'):\n\tx = tempfile.mkstemp(suffix=suffix)\n\tos.close(x(0))\n\treturn x[1]\n\ndef filepoolpath(root):\n\t\tpaths=[191,193,197,199,97]\n\t\tv = random.randint(0,9999999)\n\t\tpath = os.path.abspath(os.path.join(root,\n\t\t\t\t\tstr(v % paths[0]),\n\t\t\t\t\tstr(v % paths[1]),\n\t\t\t\t\tstr(v % paths[2]),\n\t\t\t\t\tstr(v % paths[3]),\n\t\t\t\t\tstr(v % paths[4])))\n\t\treturn path\n\ndef startsWith(text,s):\n\treturn text[:len(s)] == s\n\ndef endsWith(text,s):\n\treturn text[-len(s):] == s\n\ndef ProgramPath():\n\tfilename = sys.argv[0]\n\tif getattr(sys,'frozen',False):\n\t\tfilename = sys.executable\n\tp = os.path.dirname(os.path.abspath(filename))\n\treturn p\n\ndef timestamp2datatiemStr(ts):\n\tt = time.localtime(ts)\n\treturn '%04d-%02d-%-02d %02d:%02d:%02d' % (t.tm_year,t.tm_mon,t.tm_mday,t.tm_hour,t.tm_min,t.tm_sec)\n\t\n\"\"\"\ndef findAllDrives():\n Drives=[]\n # print \"Searching for drives...\"\n drives=win32api.GetLogicalDriveStrings().split(\":\")\n for i in drives:\n # print \"i=\",i,\":\"\n dr=i[-1].lower()\n if dr.isalpha():\n dr+=\":\\\\\"\n inf=None\n try:\n inf=win32api.GetVolumeInformation(dr)\n except:\n pass # Removable drive, not ready\n\t\t # You'll still get the drive letter, but inf will be None\n Drives.append([dr,inf])\n return Drives\n\"\"\"\n\n## list all folder name under folder named by path\n#\ndef listFolder(path, rescursive=False) :\n\tfor name in os.listdir(path) :\n\t\tfull_name = os.path.join(path,name)\n\t\tif os.path.isdir(full_name):\n\t\t\tfor f in listFolder(full_name, rescursive=rescursive):\n\t\t\t\tyield f\n\t\t\tyield full_name\n\ndef listFile(folder,suffixs=[],rescursive=False):\n subffixs = [ i.lower() for i in suffixs ]\n for f in os.listdir(folder):\n p = os.path.join(folder,f)\n if rescursive and os.path.isdir(p):\n for p1 in listFile(p,suffixs=suffixs,rescursive=True):\n\t yield p1\n if os.path.isfile(p):\n e = p.lower()\n if suffixs == [] :\n yield p\n for s in subffixs:\n if e.endswith(s):\n yield p\n\ndef folderInfo(root,uri=''):\n\trelpath = uri\n\tif uri[1]=='/':\n\t\trelpath = uri[1:]\n\t\n\tpath = os.path.join(root,*relpath.split('/'))\n\tret = []\n\tfor name in os.listdir(path):\n\t\tfull_name = os.path.join(path,name)\n\t\ts = os.stat(full_name)\n\t\tif stat.S_ISDIR(s.st_mode):\n\t\t\tret.append( {\n\t\t\t\t'id':relpath + '/' + name,\n\t\t\t\t'name':name,\n\t\t\t\t'path':relpath,\n\t\t\t\t'type':'dir',\n\t\t\t\t'size':s.st_size,\n\t\t\t\t'mtime':timestamp2datatiemStr(s.st_mtime),\n\t\t\t})\n\t\tif stat.S_ISREG(s.st_mode):\n\t\t\tret.append( {\n\t\t\t\t'id':relpath + '/' + name,\n\t\t\t\t'name':name,\n\t\t\t\t'path':relpath,\n\t\t\t\t'type':'file',\n\t\t\t\t'size':s.st_size,\n\t\t\t\t'mtime':timestamp2datatiemStr(s.st_mtime),\n\t\t\t})\n\treturn ret\n\t\t\n\t\t\ndef rmdir_recursive(dir):\n\t\"\"\"Remove a directory, and all its contents if it is not already empty.\"\"\"\n\tfor name in os.listdir(dir):\n\t\tfull_name = os.path.join(dir, name)\n\t\t# on Windows, if we don't have write permission we can't remove\n\t\t# the file/directory either, so turn that on\n\t\tif not os.access(full_name, os.W_OK):\n\t\t\tos.chmod(full_name, 0o600)\n\t\tif os.path.isdir(full_name):\n\t\t\trmdir_recursive(full_name)\n\t\telse:\n\t\t\tos.remove(full_name)\n\tos.rmdir(dir)\n\ndef _mkdir(newdir) :\n \"\"\"works the way a good mkdir should :)\n - already exists, silently complete\n - regular file in the way, raise an exception\n - parent directory(ies) does not exist, make them as well\n \"\"\"\n if os.path.isdir(newdir):\n pass\n elif os.path.isfile(newdir):\n raise OSError(\"a file with the same name as the desired \" \\\n \"dir, '%s', already exists.\" % newdir)\n else:\n head, tail = os.path.split(newdir)\n if head and not os.path.isdir(head):\n _mkdir(head)\n #print \"_mkdir %s\" % repr(newdir)\n if tail:\n os.mkdir(newdir)\n\ndef _copyfile(fp,dir) :\n\tfs = open(fp,'rb')\n\tname = os.path.basename(fp)\n\tnewfp = os.path.join(dir,getFileName(name,dir))\n\tf = open(newfp,'wb')\n\twhile True :\n\t\tdata = fs.read(65536)\n\t\tif not data :\n\t\t\tbreak\n\t\tf.write(data)\n\tfs.close()\n\tf.close()\n\treturn True\n\ndef _copydir(fp,dir,topdistinct) :\n\tname = os.path.basename(fp)\n\tnewname = getFileName(name,dir)\n\tdebug(newname)\n\tnewfp = os.path.join(dir,newname)\n\t_mkdir(newfp)\n\tif fp==topdistinct :\n\t\treturn True\n\n\tflist = os.listdir(fp)\n\tfor name in flist :\n\t\tfull_name = os.path.join(fp,name)\n\t\tif os.path.isdir(full_name) :\n\t\t\tp = os.path.join(dir,name)\n\t\t\t_copydir(full_name,newfp,topdistinct)\n\t\telse :\n\t\t\tif os.path.isfile(full_name) :\n\t\t\t\t_copyfile(full_name,newfp)\n\treturn True\n\nmkdir=_mkdir\ncopyfile = _copyfile\ncopydir = _copydir\nrmdir = rmdir_recursive\n", "sub_path": "appPublic/folderUtils.py", "file_name": "folderUtils.py", "file_ext": "py", "file_size_in_byte": 4874, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "tempfile.mkstemp", "line_number": 17, "usage_type": "call"}, {"api_name": "os.close", "line_number": 18, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 39, "usage_type": "attribute"}, {"api_name": "sys.executable", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 42, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 46, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 101, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 103, "usage_type": "call"}, {"api_name": "stat.S_ISDIR", "line_number": 104, "usage_type": "call"}, {"api_name": "stat.S_ISREG", "line_number": 113, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "os.access", "line_number": 131, "usage_type": "call"}, {"api_name": "os.W_OK", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.chmod", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path", "line_number": 133, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 136, "usage_type": "call"}, {"api_name": "os.rmdir", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "os.path.split", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path", "line_number": 151, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 160, "usage_type": "call"}, {"api_name": "os.path", "line_number": 160, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path", "line_number": 161, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 173, "usage_type": "call"}, {"api_name": "os.path", "line_number": 173, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 181, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path", "line_number": 183, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 184, "usage_type": "call"}, {"api_name": "os.path", "line_number": 184, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 185, "usage_type": "call"}, {"api_name": "os.path", "line_number": 185, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 188, "usage_type": "call"}, {"api_name": "os.path", "line_number": 188, "usage_type": "attribute"}]} +{"seq_id": "270442363", "text": "# call Slurm to execute tasks and return result or error\nimport os \nimport json\nimport configparser\nfrom subprocess import Popen, PIPE\n\nclass Slurm(object):\n \"\"\"Manage slurm conf and execute tasks\"\"\"\n def __init__(self, slurm_exec, slurm_argc=None):\n super(Slurm, self).__init__()\n self.slurm_exec = slurm_exec\n self.slurm_argc = json.loads(slurm_argc)\n self.cmd_path = {}\n self._parse_config()\n\n def run_task(self):\n \" Run task using slurm \"\n slurm_cmd = [self.cmd_path[self.slurm_exec]]\n if self.slurm_argc:\n slurm_cmd.extend(self.slurm_argc)\n p = Popen(slurm_cmd, stdout=PIPE, stderr=PIPE)\n (result, error) = p.communicate()\n return (result, error)\n\n def test_run(self):\n \" Run task using slurm \"\n slurm_cmd = [self.cmd_path[self.slurm_exec]]\n if self.slurm_argc:\n slurm_cmd.extend(self.slurm_argc)\n p = Popen(slurm_cmd, stdout=PIPE, stderr=PIPE)\n\n def _parse_config(self):\n \" Parse configure file to get the path of slurm cmd \"\n conf_section = 'PATH'\n conf_slurm_path = 'slurm_path'\n conf_srun = 'srun'\n conf_sbatch = 'sbatch'\n conf_salloc = 'salloc'\n conf_scancel = 'scancel'\n conf_scontrol = 'scontrol'\n conf_openmpi_path = 'openmpi_path'\n conf_mpi_path = 'mpi_path'\n\n conf_path = os.getenv('TENO_SLURM_CONF')\n if not conf_path:\n conf_path = \"./teno-slurm.conf\"\n\n config = configparser.ConfigParser()\n config.read(conf_path)\n if conf_path in config.sections():\n section = config[conf_section]\n self.slurm_path = section.get(conf_slurm_path)\n self.cmd_path[conf_srun] = section.get(conf_srun)\n self.cmd_path[conf_sbatch] = section.get(conf_sbatch)\n self.cmd_path[conf_salloc] = section.get(conf_salloc)\n self.cmd_path[conf_scancel] = section.get(conf_scancel)\n self.cmd_path[conf_scontrol] = section.get(conf_scontrol)\n self.cmd_path[conf_openmpi_path] = section.get(conf_openmpi_path)\n self.cmd_path[conf_mpi_path] = section.get(conf_mpi_path)\n self._check_conf()\n\n def _check_conf(self):\n \" Checking slurm conf is configured, if not assign defalut value\"\n conf_slurm_path = 'slurm_path'\n conf_srun = 'srun'\n conf_sbatch = 'sbatch'\n conf_salloc = 'salloc'\n conf_scancel = 'scancel'\n conf_scontrol = 'scontrol'\n default_slurm_path = \"/usr\"\n default_srun = \"/usr/bin/srun\"\n default_sbatch = \"/usr/bin/sbatch\"\n default_salloc = \"/usr/bin/salloc\"\n default_scancel = \"/usr/bin/scancel\"\n default_scontrol = \"/usr/bin/scontrol\"\n if not self.cmd_path.get(conf_slurm_path):\n self.cmd_path[conf_slurm_path] = default_slurm_path\n if not self.cmd_path.get(conf_srun):\n self.cmd_path[conf_srun] = default_srun\n if not self.cmd_path.get(conf_sbatch):\n self.cmd_path[conf_sbatch] = default_sbatch\n if not self.cmd_path.get(conf_salloc):\n self.cmd_path[conf_salloc] = default_salloc\n if not self.cmd_path.get(conf_scancel):\n self.cmd_path[conf_scancel] = default_scancel\n if not self.cmd_path.get(conf_scontrol):\n self.cmd_path[conf_scontrol] = default_scontrol\n\n\ndef run(task_exec, slurm_argc):\n slurm_task = Slurm(task_exec, slurm_argc)\n (result, error) = slurm_task.run_task()\n return_code = 0 if result else 1\n return (return_code, result.decode('UTF-8'), error.decode('UTF-8'))\n\nif __name__ == '__main__':\n slurm_task = Slurm('srun', ['hostname'])\n result, error = slurm_task.run_task()\n return_code = 0 if result else 1\n print (return_code, result.decode('UTF-8'), error.decode('UTF-8'))", "sub_path": "slurm/slurm.py", "file_name": "slurm.py", "file_ext": "py", "file_size_in_byte": 3873, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "json.loads", "line_number": 12, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 21, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 21, "usage_type": "name"}, {"api_name": "subprocess.Popen", "line_number": 30, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 30, "usage_type": "name"}, {"api_name": "os.getenv", "line_number": 44, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "210470102", "text": "import mock\n\nfrom django.test import TestCase\n\nfrom oscar.apps.checkout.mixins import CheckoutSessionMixin, OrderPlacementMixin\nfrom oscar.apps.checkout.exceptions import FailedPreCondition\nfrom oscar.test import factories\nfrom oscar.test.utils import RequestFactory\n\n\nclass TestOrderPlacementMixin(TestCase):\n\n def test_returns_none_when_no_shipping_address_passed_to_creation_method(self):\n address = OrderPlacementMixin().create_shipping_address(\n user=mock.Mock(), shipping_address=None)\n self.assertEqual(address, None)\n\n\nclass TestCheckoutSessionMixin(TestCase):\n\n def setUp(self):\n self.request = RequestFactory().get('/')\n self.product = factories.create_product(num_in_stock=10)\n self.stock_record = self.product.stockrecords.first()\n\n def add_product_to_basket(self, product, quantity=1):\n self.request.basket.add_product(product, quantity=quantity)\n self.assertEquals(len(self.request.basket.all_lines()), 1)\n self.assertEquals(self.request.basket.all_lines()[0].product, product)\n\n def test_check_basket_is_valid_no_stock_available(self):\n self.add_product_to_basket(self.product)\n CheckoutSessionMixin().check_basket_is_valid(self.request)\n self.stock_record.allocate(10)\n self.stock_record.save()\n with self.assertRaises(FailedPreCondition):\n CheckoutSessionMixin().check_basket_is_valid(self.request)\n\n def test_check_basket_is_valid_stock_exceeded(self):\n self.add_product_to_basket(self.product)\n CheckoutSessionMixin().check_basket_is_valid(self.request)\n self.request.basket.add_product(self.product, quantity=11)\n with self.assertRaises(FailedPreCondition):\n CheckoutSessionMixin().check_basket_is_valid(self.request)\n", "sub_path": "tests/integration/checkout/test_mixins.py", "file_name": "test_mixins.py", "file_ext": "py", "file_size_in_byte": 1805, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.test.TestCase", "line_number": 11, "usage_type": "name"}, {"api_name": "oscar.apps.checkout.mixins.OrderPlacementMixin", "line_number": 14, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 15, "usage_type": "call"}, {"api_name": "django.test.TestCase", "line_number": 19, "usage_type": "name"}, {"api_name": "oscar.test.utils.RequestFactory", "line_number": 22, "usage_type": "call"}, {"api_name": "oscar.test.factories.create_product", "line_number": 23, "usage_type": "call"}, {"api_name": "oscar.test.factories", "line_number": 23, "usage_type": "name"}, {"api_name": "oscar.apps.checkout.mixins.CheckoutSessionMixin", "line_number": 33, "usage_type": "call"}, {"api_name": "oscar.apps.checkout.exceptions.FailedPreCondition", "line_number": 36, "usage_type": "argument"}, {"api_name": "oscar.apps.checkout.mixins.CheckoutSessionMixin", "line_number": 37, "usage_type": "call"}, {"api_name": "oscar.apps.checkout.mixins.CheckoutSessionMixin", "line_number": 41, "usage_type": "call"}, {"api_name": "oscar.apps.checkout.exceptions.FailedPreCondition", "line_number": 43, "usage_type": "argument"}, {"api_name": "oscar.apps.checkout.mixins.CheckoutSessionMixin", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "137732888", "text": "# yellowbrick.colors\n# Colors and color helpers brought in from a different library.\n#\n# Author: Benjamin Bengfort \n# Created: Fri Jun 24 17:02:53 2016 -0400\n#\n# Copyright (C) 2016 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: colors.py [c6aff34] benjamin@bengfort.com $\n\n\"\"\"\nColors and color helpers brought in from an alternate library.\nSee https://bl.ocks.org/mbostock/5577023\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport random\nimport warnings\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.cm as cm\n\nfrom copy import copy\nfrom six import string_types\nfrom yellowbrick.exceptions import YellowbrickValueError\n\n# Check to see if matplotlib is at least sorta up to date\nfrom distutils.version import LooseVersion\nmpl_ge_150 = LooseVersion(mpl.__version__) >= \"1.5.0\"\n\n\n##########################################################################\n## Color Utilities\n##########################################################################\n\ndef get_color_cycle():\n \"\"\"\n Returns the current color cycle from matplotlib.\n \"\"\"\n if mpl_ge_150:\n cyl = mpl.rcParams['axes.prop_cycle']\n # matplotlib 1.5 verifies that axes.prop_cycle *is* a cycler\n # but no garuantee that there's a `color` key.\n # so users could have a custom rcParmas w/ no color...\n try:\n return [x['color'] for x in cyl]\n except KeyError:\n pass # just return axes.color style below\n return mpl.rcParams['axes.color_cycle']\n\n\ndef resolve_colors(num_colors=None, colormap=None, color=None):\n \"\"\"\n Resolves the colormap or the color list with the number of colors.\n See: https://github.com/pydata/pandas/blob/master/pandas/tools/plotting.py#L163\n\n Parameters\n ----------\n num_colors : int or None\n the number of colors in the cycle or colormap\n\n colormap : str or None\n the colormap used to create the sequence of colors\n\n color : list or None\n the list of colors to specifically use with the plot\n\n \"\"\"\n\n # Work with the colormap\n if color is None and colormap is None:\n if isinstance(colormap, str):\n cmap = colormap\n colormap = cm.get_cmap(colormap)\n\n if colormap is None:\n raise YellowbrickValueError(\n \"Colormap {0} is not a valid matploblib cmap\".format(cmap)\n )\n\n colors = list(map(colormap, np.linspace(0, 1, num=num_colors)))\n\n # Work with the color list\n elif color is not None:\n\n if colormap is not None:\n warnings.warn(\n \"'color' and 'colormap' cannot be used simultaneously! Using 'color'.\"\n )\n\n colors = list(color) # Ensure colors is a list\n\n # Get the default colors\n else:\n colors = get_color_cycle()\n\n if len(colors) != num_colors:\n multiple = num_colors // len(colors) - 1\n mod = num_colors % len(colors)\n colors += multiple * colors\n colors += colors[:mod]\n\n return colors\n\n\nclass ColorMap(object):\n \"\"\"\n A helper for mapping categorical values to colors on demand.\n \"\"\"\n\n def __init__(self, colors='flatui', shuffle=False):\n \"\"\"\n Specify either a list of colors or one of the color names. If shuffle\n is True then the colors will be shuffled randomly.\n \"\"\"\n self.mapping = {}\n self.colors = colors\n\n if shuffle:\n random.shuffle(self._colors)\n\n @property\n def colors(self):\n return self._colors\n\n @colors.setter\n def colors(self, value):\n \"\"\"\n Converts color strings into a color listing.\n \"\"\"\n if isinstance(value, string_types):\n if value not in PALETTES:\n raise YellowbrickValueError(\n \"'{}' is not a registered color palette\".format(value)\n )\n self._colors = copy(PALETTES[value])\n elif isinstance(value, list):\n self._colors = value\n else:\n self._colors = list(value)\n\n def __call__(self, category):\n if category not in self.mapping:\n if self.colors:\n self.mapping[category] = self.colors.pop()\n else:\n raise YellowbrickValueError(\n \"Not enough colors for this many categories!\"\n )\n\n return self.mapping[category]\n", "sub_path": "yellowbrick/style/colors.py", "file_name": "colors.py", "file_ext": "py", "file_size_in_byte": 4566, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "distutils.version.LooseVersion", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.__version__", "line_number": 33, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 45, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 53, "usage_type": "attribute"}, {"api_name": "matplotlib.cm.get_cmap", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.cm", "line_number": 78, "usage_type": "name"}, {"api_name": "yellowbrick.exceptions.YellowbrickValueError", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 85, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 91, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 124, "usage_type": "call"}, {"api_name": "six.string_types", "line_number": 135, "usage_type": "argument"}, {"api_name": "yellowbrick.exceptions.YellowbrickValueError", "line_number": 137, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 140, "usage_type": "call"}, {"api_name": "yellowbrick.exceptions.YellowbrickValueError", "line_number": 151, "usage_type": "call"}]} +{"seq_id": "391182671", "text": "import pyaudio\nimport audioop\nimport numpy as np\nimport inspect\nimport operator\nimport pyglet\n\nFORMAT = pyaudio.paFloat32\nCHANNELS = 1\nRATE = 44100\nCHUNK = 256\nBATCH_SECONDS = 0.2\n\nBATCH_SIZE = int(RATE/CHUNK * BATCH_SECONDS)\n\nclass MyWindow(pyglet.window.Window):\n def __init__(self, screen):\n super().__init__(screen=screen, fullscreen=True)\n self.label = pyglet.text.Label('ZU LAUT!',\n font_name='impact',\n font_size=36,\n x=self.width//2, y=self.height//2,\n anchor_x='center', anchor_y='center')\n self.image = pyglet.image.SolidColorImagePattern((255, 0, 0, 255))\\\n .create_image(self.width, self.height)\n\n def on_draw(self):\n self.clear()\n self.image.blit(0, 0)\n self.label.draw()\n \n\nclass MyEventLoop(pyglet.app.EventLoop):\n def __init__(self):\n super().__init__()\n self.audio = pyaudio.PyAudio()\n self.stream = self.audio.open(format=FORMAT, channels=CHANNELS,\n rate=RATE, input=True,\n frames_per_buffer=CHUNK)\n self.batch = np.zeros((BATCH_SIZE, CHUNK))\n self.i = 0\n\n def idle(self):\n pyglet.clock.tick(poll=True)\n\n data = self.stream.read(CHUNK, exception_on_overflow=False)\n numpydata = np.fromstring(data, dtype=np.float32)\n self.batch[self.i] = numpydata\n self.i = (self.i+1) % BATCH_SIZE\n if self.i == 0:\n for window in pyglet.app.windows:\n window.set_visible(self.batch.max() > 0.6)\n\n for window in pyglet.app.windows:\n window.switch_to()\n window.dispatch_event('on_draw')\n window.flip()\n\n \n return pyglet.clock.get_sleep_time(sleep_idle=False)\n\n\ndisplay = pyglet.window.get_platform().get_default_display()\nscreens = display.get_screens()\n\nwindows = [MyWindow(screens[1]), MyWindow(screens[2])]\n\nMyEventLoop().run()\n", "sub_path": "alert.py", "file_name": "alert.py", "file_ext": "py", "file_size_in_byte": 2065, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pyaudio.paFloat32", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pyglet.window", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pyglet.text.Label", "line_number": 19, "usage_type": "call"}, {"api_name": "pyglet.text", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pyglet.image.SolidColorImagePattern", "line_number": 24, "usage_type": "call"}, {"api_name": "pyglet.image", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pyglet.app", "line_number": 33, "usage_type": "attribute"}, {"api_name": "pyaudio.PyAudio", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 40, "usage_type": "call"}, {"api_name": "pyglet.clock.tick", "line_number": 44, "usage_type": "call"}, {"api_name": "pyglet.clock", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.fromstring", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 47, "usage_type": "attribute"}, {"api_name": "pyglet.app", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pyglet.app", "line_number": 54, "usage_type": "attribute"}, {"api_name": "pyglet.clock.get_sleep_time", "line_number": 60, "usage_type": "call"}, {"api_name": "pyglet.clock", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pyglet.window.get_platform", "line_number": 63, "usage_type": "call"}, {"api_name": "pyglet.window", "line_number": 63, "usage_type": "attribute"}]} +{"seq_id": "638047236", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('main', '0004_auto_20160306_1429'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='repeatavail',\n options={'ordering': ('start',)},\n ),\n migrations.RemoveField(\n model_name='repeatevent',\n name='repeatAvailId',\n ),\n migrations.AlterField(\n model_name='repeatavail',\n name='end',\n field=models.IntegerField(),\n ),\n ]\n", "sub_path": "main/migrations/old/0005_auto_20160306_1449.py", "file_name": "0005_auto_20160306_1449.py", "file_ext": "py", "file_size_in_byte": 633, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterModelOptions", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.migrations.RemoveField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "313674497", "text": "# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utility wrappers around apitools generator.\"\"\"\n\nimport logging\nimport os\n\nfrom apitools.gen import gen_client\nfrom tools.regen_apis import api_def\nfrom tools.regen_apis import resource_generator\nfrom mako import runtime\nfrom mako import template\n\n\n_INIT_FILE_CONTENT = \"\"\"\\\n# Copyright 2016 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n\n\nclass NoDefaultApiError(Exception):\n \"\"\"Multiple apis versions are specified but no default is set.\"\"\"\n\n\nclass WrongDiscoveryDoc(Exception):\n \"\"\"Unexpected discovery doc.\"\"\"\n\n\ndef GenerateApi(base_dir, root_dir, api_name, api_version, api_config):\n \"\"\"Invokes apitools generator for given api.\"\"\"\n discovery_doc = api_config['discovery_doc']\n\n args = [gen_client.__file__]\n\n unelidable_request_methods = api_config.get('unelidable_request_methods')\n if unelidable_request_methods:\n args.append('--unelidable_request_methods={0}'.format(\n ','.join(api_config['unelidable_request_methods'])))\n\n args.extend([\n '--init-file=empty',\n '--nogenerate_cli',\n '--infile={0}'.format(os.path.join(base_dir, root_dir, discovery_doc)),\n '--outdir={0}'.format(os.path.join(base_dir, root_dir, api_name,\n api_version)),\n '--overwrite',\n '--apitools_version=CloudSDK',\n '--root_package',\n '{0}.{1}.{2}'.format(\n root_dir.replace('/', '.'), api_name, api_version),\n 'client',\n ])\n logging.debug('Apitools gen %s', args)\n gen_client.main(args)\n\n package_dir = base_dir\n for subdir in [root_dir, api_name, api_version]:\n package_dir = os.path.join(package_dir, subdir)\n init_file = os.path.join(package_dir, '__init__.py')\n if not os.path.isfile(init_file):\n logging.warn('%s does not have __init__.py file, generating ...',\n package_dir)\n with open(init_file, 'w') as f:\n f.write(_INIT_FILE_CONTENT)\n\n\ndef _CamelCase(snake_case):\n return ''.join(x.capitalize() for x in snake_case.split('_'))\n\n\ndef _MakeApiMap(root_package, api_config):\n \"\"\"Converts a map of api_config into ApiDef.\n\n Args:\n root_package: str, root path of where generate api will reside.\n api_config: {api_name->api_version->{discovery,default,version,...}},\n description of each api.\n Returns:\n {api_name->api_version->ApiDef()}.\n\n Raises:\n NoDefaultApiError: if for some api with multiple versions\n default was not specified.\n \"\"\"\n apis_map = {}\n apis_with_default = set()\n for api_name, api_version_config in api_config.iteritems():\n api_versions_map = apis_map.setdefault(api_name, {})\n has_default = False\n for api_version, api_config in api_version_config.iteritems():\n default = api_config.get('default', len(api_version_config) == 1)\n has_default = has_default or default\n version = api_config.get('version', api_version)\n client_classpath = '.'.join([\n '_'.join([api_name, version, 'client']),\n _CamelCase(api_name) + _CamelCase(version)])\n messages_modulepath = '_'.join([api_name, version, 'messages'])\n api_versions_map[api_version] = api_def.APIDef(\n '.'.join([root_package, api_name, api_version]),\n client_classpath, messages_modulepath, default)\n if has_default:\n apis_with_default.add(api_name)\n\n apis_without_default = set(apis_map.keys()).difference(apis_with_default)\n if apis_without_default:\n raise NoDefaultApiError('No default client versions found for [{0}]!'\n .format(', '.join(sorted(apis_without_default))))\n return apis_map\n\n\ndef GenerateApiMap(base_dir, root_dir, api_config):\n \"\"\"Create an apis_map.py file in the given root_dir with for given api_config.\n\n Args:\n base_dir: str, Path of directory for the project.\n root_dir: str, Path of the map file location within the project.\n api_config: regeneration config for all apis.\n \"\"\"\n\n api_def_filename, _ = os.path.splitext(api_def.__file__)\n with open(api_def_filename + '.py', 'rU') as api_def_file:\n api_def_source = api_def_file.read()\n\n tpl = template.Template(filename=os.path.join(os.path.dirname(__file__),\n 'template.tpl'))\n api_map_file = os.path.join(base_dir, root_dir, 'apis_map.py')\n logging.debug('Generating api map at %s', api_map_file)\n api_map = _MakeApiMap(root_dir.replace('/', '.'), api_config)\n logging.debug('Creating following api map %s', api_map)\n with open(api_map_file, 'wb') as apis_map_file:\n ctx = runtime.Context(apis_map_file,\n api_def_source=api_def_source,\n apis_map=api_map)\n tpl.render_context(ctx)\n\n\ndef GenerateResourceModule(base_dir, root_dir, api_name, api_version,\n discovery_doc_path, custom_resources):\n \"\"\"Create resource.py file for given api and its discovery doc.\n\n Args:\n base_dir: str, Path of directory for the project.\n root_dir: str, Path of the resource file location within the project.\n api_name: str, name of the api.\n api_version: str, the version for the api.\n discovery_doc_path: str, file path to discovery doc.\n custom_resources: dict, dictionary of custom resource collections.\n Raises:\n WrongDiscoveryDoc: if discovery doc api name/version does not match.\n \"\"\"\n\n discovery_doc = resource_generator.DiscoveryDoc.FromJson(\n os.path.join(base_dir, root_dir, discovery_doc_path))\n if discovery_doc.api_version != api_version:\n logging.warn('Discovery api version %s does not match %s, '\n 'this client will be accessible via new alias.',\n discovery_doc.api_version, api_version)\n if discovery_doc.api_name != api_name:\n raise WrongDiscoveryDoc('api name {0}, expected {1}'\n .format(discovery_doc.api_name, api_name))\n resource_collections = discovery_doc.GetResourceCollections(api_version)\n if custom_resources:\n # Check if this is redefining one of the existing collections.\n matched_resources = set([])\n for collection in resource_collections:\n if collection.name in custom_resources:\n matched_resources.add(collection.name)\n custom_path = custom_resources[collection.name]\n if isinstance(custom_path, dict):\n collection.flat_paths.update(custom_path)\n elif isinstance(custom_path, basestring):\n collection.flat_paths[\n resource_generator.DEFAULT_PATH_NAME] = custom_path\n # Remaining must be new custom resources.\n for collection_name in set(custom_resources.keys()) - matched_resources:\n collection_path = custom_resources[collection_name]\n collection_info = discovery_doc.MakeResourceCollection(\n collection_name, collection_path, api_version)\n resource_collections.append(collection_info)\n\n api_dir = os.path.join(base_dir, root_dir, api_name, api_version)\n if not os.path.exists(api_dir):\n os.makedirs(api_dir)\n resource_file_name = os.path.join(api_dir, 'resources.py')\n logging.debug('Generating resource module at %s', resource_file_name)\n\n if resource_collections:\n tpl = template.Template(filename=os.path.join(os.path.dirname(__file__),\n 'resources.tpl'))\n with open(resource_file_name, 'wb') as output_file:\n ctx = runtime.Context(output_file,\n collections=sorted(resource_collections),\n base_url=resource_collections[0].base_url)\n tpl.render_context(ctx)\n", "sub_path": "google-cloud-sdk/lib/tools/regen_apis/regen.py", "file_name": "regen.py", "file_ext": "py", "file_size_in_byte": 8658, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "apitools.gen.gen_client.__file__", "line_number": 56, "usage_type": "attribute"}, {"api_name": "apitools.gen.gen_client", "line_number": 56, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 76, "usage_type": "call"}, {"api_name": "apitools.gen.gen_client.main", "line_number": 77, "usage_type": "call"}, {"api_name": "apitools.gen.gen_client", "line_number": 77, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path", "line_number": 83, "usage_type": "attribute"}, {"api_name": "logging.warn", "line_number": 84, "usage_type": "call"}, {"api_name": "tools.regen_apis.api_def.APIDef", "line_number": 121, "usage_type": "call"}, {"api_name": "tools.regen_apis.api_def", "line_number": 121, "usage_type": "name"}, {"api_name": "os.path.splitext", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "tools.regen_apis.api_def.__file__", "line_number": 143, "usage_type": "attribute"}, {"api_name": "tools.regen_apis.api_def", "line_number": 143, "usage_type": "name"}, {"api_name": "mako.template.Template", "line_number": 147, "usage_type": "call"}, {"api_name": "mako.template", "line_number": 147, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path", "line_number": 147, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 147, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 149, "usage_type": "call"}, {"api_name": "os.path", "line_number": 149, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 150, "usage_type": "call"}, {"api_name": "logging.debug", "line_number": 152, "usage_type": "call"}, {"api_name": "mako.runtime.Context", "line_number": 154, "usage_type": "call"}, {"api_name": "mako.runtime", "line_number": 154, "usage_type": "name"}, {"api_name": "tools.regen_apis.resource_generator.DiscoveryDoc.FromJson", "line_number": 175, "usage_type": "call"}, {"api_name": "tools.regen_apis.resource_generator.DiscoveryDoc", "line_number": 175, "usage_type": "attribute"}, {"api_name": "tools.regen_apis.resource_generator", "line_number": 175, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "logging.warn", "line_number": 178, "usage_type": "call"}, {"api_name": "tools.regen_apis.resource_generator.DEFAULT_PATH_NAME", "line_number": 196, "usage_type": "attribute"}, {"api_name": "tools.regen_apis.resource_generator", "line_number": 196, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 204, "usage_type": "call"}, {"api_name": "os.path", "line_number": 204, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path", "line_number": 205, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 206, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path", "line_number": 207, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 208, "usage_type": "call"}, {"api_name": "mako.template.Template", "line_number": 211, "usage_type": "call"}, {"api_name": "mako.template", "line_number": 211, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 211, "usage_type": "call"}, {"api_name": "os.path", "line_number": 211, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 211, "usage_type": "call"}, {"api_name": "mako.runtime.Context", "line_number": 214, "usage_type": "call"}, {"api_name": "mako.runtime", "line_number": 214, "usage_type": "name"}]} +{"seq_id": "208529369", "text": "import serial\r\nimport psutil\r\nimport time\r\nimport socket\r\nimport threading\r\n\r\n\r\nHOST_NAME = socket.gethostname()\r\nIPv4 = socket.gethostbyname(HOST_NAME)\r\nPORT = 3090\r\nCLT = 0\r\nCONSOL = \"HOLA\"\r\n\r\ndef load_para():\r\n \r\n HN = HOST_NAME.split('-')[1]\r\n CPU = int(psutil.cpu_percent())\r\n RAM = int(psutil.virtual_memory().percent)\r\n HDD = int(psutil.disk_usage('C:\\\\').percent)\r\n\r\n MSG = \"PARA:DT-\" + str(HN) + ':' + IPv4 + ':' + str(PORT) + ':' + str(CPU) + ':' \\\r\n + str(RAM) + ':' + str(HDD) + ':' + str(CLT) + ':'\r\n \r\n return MSG\r\n\r\n\r\n\r\ndef DisplayLoop():\r\n time.sleep(1)\r\n while(True):\r\n time.sleep(1.1)\r\n Serial_COM1.write(load_para().encode('ascii'))\r\n time.sleep(1.1)\r\n val = \"CONSOL:\" + CONSOL\r\n Serial_COM1.write(val.encode('ascii'))\r\n \r\n \r\n \r\n\r\ntry:\r\n Serial_COM1 = serial.Serial(port = 'COM7',baudrate = 250000, timeout = 1 )\r\n\r\nexcept serial.SerialException:\r\n print(\"COM PORT NOT FOUND !\")\r\n exit()\r\n \r\n \r\nThread_01 = threading.Thread(target = DisplayLoop)\r\n\r\nSerial_COM1.write(\"OK\".encode('ascii'))\r\n\r\ntime.sleep(2)\r\n\r\nprint(HOST_NAME)\r\nprint(IPv4)\r\nprint(PORT)\r\n\r\nThread_01.start()\r\n\r\ni = 0 \r\nx = 0 \r\nwhile True:\r\n x += 1 \r\n CONSOL = str (x) \r\n time.sleep(2)\r\n\r\n\r\n", "sub_path": "FAULT_IDENTIFICATION_BY_CONDITION_MONITORING-SK/CODE/SERVER/SCRYPT.py", "file_name": "SCRYPT.py", "file_ext": "py", "file_size_in_byte": 1305, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "socket.gethostname", "line_number": 8, "usage_type": "call"}, {"api_name": "socket.gethostbyname", "line_number": 9, "usage_type": "call"}, {"api_name": "psutil.cpu_percent", "line_number": 17, "usage_type": "call"}, {"api_name": "psutil.virtual_memory", "line_number": 18, "usage_type": "call"}, {"api_name": "psutil.disk_usage", "line_number": 19, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 31, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 41, "usage_type": "call"}, {"api_name": "serial.SerialException", "line_number": 43, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 48, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 52, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "399851657", "text": "# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %% Change working directory from the workspace root to the ipynb file\n# location. Turn this addition off with the DataScience.\n# changeDirOnImportExport setting\n# ms-python.python added\nimport os\ntry:\n os.chdir(os.path.join(os.getcwd(), 'week13/homework'))\n print(os.getcwd())\nexcept:\n pass\n# %%\nimport matplotlib.tri as tri\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom tqdm import tqdm\nimport time\n\ntime.perf_counter()\nstart = time.time()\n\n# set parameters\nn_samples = 10000\nn_step = 10000\n\n# genarte data\n# initial condition\n# 在(0,0)生成黑洞\n# 在以(0,0)为圆心,r=10000的圆盘内生成均匀分布的粒子\n# 粒子的初速度为\\mu=5的正态分布\nr = np.random.uniform(0, 10000, n_samples)\ntheta = np.random.uniform(0, 2*np.pi, n_samples)\nx = r*np.cos(theta)\ny = r*np.sin(theta)\nvx = np.random.normal(size=n_samples)*5\nvy = np.random.normal(size=n_samples)*5\n\n# random walk\nx_rand = []\ny_rand = []\nprint(\"Genarating data>>>\")\nfor i in tqdm(range(n_step)):\n x_rand.append(np.random.random(n_samples) *\n np.cos(np.random.uniform(0, 2*np.pi, n_samples)))\n y_rand.append(np.random.random(n_samples) *\n np.sin(np.random.uniform(0, 2*np.pi, n_samples)))\n\n# caculate\nprint(\"Caculating>>>\")\nn = 0\nfor step in tqdm(range(n_step)):\n r2 = x**2 + y**2\n F = 100000 / r2**1.5\n ax = -F * x\n ay = -F * y\n vx += ax\n vy += ay\n x += x_rand[step] + vx\n y += y_rand[step] + vy\n\n# save each single picture every 50 step and later generate them into GIF\n'''\n if (step % 50 == 0):\n plt.figure(figsize=(10,10))\n plt.plot([0],[0], 'ro', markersize=5)\n plt.plot(x, y, 'bo', markersize=3, alpha=0.2)\n plt.xlim(-n_samples , n_samples )\n plt.ylim(-n_samples , n_samples )\n plt.savefig('./fig/%d.png' % n)\n plt.cla()\n plt.close()\n n = n+1\n'''\n\nend = time.time()\nelapse = end - start\nprint('total time: ' + str(elapse))\n# saveGif.main(n)\n\n\n# %%\n# Save gif\n'''\nimport imageio\n\n\ndef create_gif(image_list, gif_name):\n\n frames = []\n for image_name in image_list:\n frames.append(imageio.imread(image_name))\n # Save them as frames into a gif\n imageio.mimsave(gif_name, frames, 'GIF', duration=0.005)\n print(\"Complete !\")\n return\n\n\ndef main(n):\n image_list = []\n for i in range(n):\n image_list.append('./fig/'+str(i) + '.png')\n gif_name = 'created_gif'\n create_gif(image_list, gif_name)\n\n\nif __name__ == \"__main__\":\n main(n)\n\n'''\n# %%\n# plot the scatter and the hist\nprint(\"ploting scatter and hist...\")\nplt.figure(figsize=(8, 8))\ngrid = plt.GridSpec(4, 4, wspace=0.5, hspace=0.5)\n\nmain_ax = plt.subplot(grid[1:4, 0:3])\nplt.plot(x, y, 'bo', markersize=3, alpha=0.2)\nplt.xlim(-n_samples, n_samples)\nplt.ylim(-n_samples, n_samples)\n\ny_hist = plt.subplot(grid[1:4, 3], xticklabels=[], sharey=main_ax)\nplt.hist(y, 20, range=(-n_samples, n_samples),\n orientation='horizontal', color='blue')\n\nx_hist = plt.subplot(grid[0, 0:3], xticklabels=[], sharex=main_ax)\nplt.hist(x, 20, range=(-n_samples, n_samples),\n orientation='vertical', color='blue')\n\nplt.show()\n\n\n# %%\n# plot contour\nprint(\"ploting contour...\")\nv = (vx**2+vy**2)/2\ndata = np.concatenate(\n [x.reshape(-1, 1), y.reshape(-1, 1), v.reshape(-1, 1)], axis=1)\nlen(data)\n\n# set x,y limit to 1000\nmask1 = abs(data[:, 0]) < 1000\nDATA = data[mask1]\nmask2 = abs(DATA[:, 1]) < 1000\nDATA = DATA[mask2]\nxi = DATA[:, 0]\nyi = DATA[:, 1]\nzi = DATA[:, 2]\n\n# Linear triangulation interpolate\ntriang = tri.Triangulation(xi, yi)\ninterpolator = tri.LinearTriInterpolator(triang, zi)\nX, Y = np.meshgrid(xi, yi)\nZ = interpolator(X, Y)\n\n\nplt.figure(figsize=(10, 6))\n# 填充颜色,f即filled\n# plt.contourf(X,Y,Z)\n# 画等高线\nplt.contour(X, Y, Z)\nplt.show()\n", "sub_path": "week13/homework/homework.py", "file_name": "homework.py", "file_ext": "py", "file_size_in_byte": 3896, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.chdir", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 9, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 10, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 20, "usage_type": "call"}, {"api_name": "time.time", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 32, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.random.normal", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 44, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.sin", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 47, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 52, "usage_type": "call"}, {"api_name": "time.time", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.GridSpec", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 127, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "numpy.concatenate", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.tri.Triangulation", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.tri", "line_number": 151, "usage_type": "name"}, {"api_name": "matplotlib.tri.LinearTriInterpolator", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.tri", "line_number": 152, "usage_type": "name"}, {"api_name": "numpy.meshgrid", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 157, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.contour", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}]} +{"seq_id": "360034830", "text": "\nfrom insta import models\nfrom django.contrib.auth import login, authenticate\nfrom django.shortcuts import render, redirect\nfrom .forms import SignUpForm\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.models import User\nfrom .models import userprofile,userpost,postuploads\ndef signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n raw_password = form.cleaned_data.get('password1')\n user = authenticate(username=username, password=raw_password)\n login(request, user)\n else:\n form = SignUpForm()\n return render(request, 'signup.html', {'form': form})\ndef myview(request):\n\tusername=request.user\n\tuser=User.objects.get(username=username)\n\tfollowercount=userprofile.objects.filter(following=username).count()\n\tfollowingcount=userprofile.objects.filter(followers=username).count()\n\tprofilepic=userpost.objects.filter(username=request.user)\n\treturn render (request,\"home.html\",{\"user\":user,\"followingcount\":followingcount,\"followercount\":followercount,\"profilepic\":profilepic})\n\ndef users(request):\n\tif request.method==\"POST\":\n\t\tuserdict=request.POST\n\t\tlistuser=[x for x in userdict.values()]\n\t\tselected=User.objects.get(username=listuser[1])\n\t\tuser=User.objects.get(username=request.user)\n\t\tif userprofile.objects.filter(followers=request.user,following=selected):\n\t\t\tfollowinglist=userprofile.objects.filter(followers=request.user,following=selected).delete()\n\t\t\t\n\t\telse:\n\t\t\tfollowinglist=userprofile.objects.get_or_create(followers=user,following=selected)\n\t\treturn redirect(\"http://127.0.0.1:8000/accounts/profile/users\")\n\tfollowinglist=userprofile.objects.filter(followers=request.user).values_list(\"following__username\",flat=True)\n\tprofilepic=userpost.objects.all().exclude(username=request.user)\n\treturn render (request,\"users.html\",{\"followinglist\":followinglist,\"profilepic\":profilepic})\n\t\ndef friendsprofile(request,username):\n\t\n\tselecteduser=User.objects.get(username=username)\n\tfollowercount=userprofile.objects.filter(following=selecteduser).count()\n\tfollowingcount=userprofile.objects.filter(followers=selecteduser).count()\n\tprofilepic=userpost.objects.filter(username=selecteduser)\n\treturn render (request,\"friendsprof.html\",{\"profilepic\":profilepic,\"selecteduser\":selecteduser,\"followingcount\":followingcount,\"followercount\":followercount})\n\ndef followers(request):\n\tu=userprofile.objects.filter(following=request.user).values_list(\"followers__username\",flat=True)\n\tif request.method==\"POST\":\n\t\tlistuser=[x for x in request.POST.values()][1]\n\t\tselected=User.objects.get(username=listuser)\n\t\tif userprofile.objects.filter(followers=request.user,following=selected):\n\t\t\tfollowinglist=userprofile.objects.filter(followers=request.user,following=selected).delete()\n\t\telse:\n\t\t\tfollowinglist=userprofile.objects.get_or_create(followers=request.user,following=selected)\n\t\treturn redirect(\"followers\")\n\tb=userprofile.objects.filter(followers=request.user).values_list(\"following__username\",flat=True)\n\tprofilepic=userpost.objects.all().exclude(username=request.user)\n\treturn render (request,\"followers.html\",{\"u\":u,\"b\":b,\"profilepic\":profilepic})\n\ndef following(request):\n\tif request.method==\"POST\":\n\t\tlistuser=[x for x in request.POST.values()][1]\n\t\tselected=User.objects.get(username=listuser)\n\t\tfollowinglist=userprofile.objects.filter(followers=request.user,following=selected).delete()\n\tprofilepic=userpost.objects.all().exclude(username=request.user)\n\tfollowinglist=userprofile.objects.filter(followers=request.user).values_list(\"following__username\",flat=True)\n\treturn render (request,\"following.html\",{\"followinglist\":followinglist,\"profilepic\":profilepic})\n\ndef friendsfollowers(request,username):\n\tif request.method==\"POST\":\n\t\tlistuser=[x for x in request.POST.values()][1]\n\t\tselected=User.objects.get(username=listuser)\n\t\tif userprofile.objects.filter(followers=request.user,following=selected):\n\t\t\tfollowinglist=userprofile.objects.filter(followers=request.user,following=selected).delete()\n\t\telse:\n\t\t\tfollowinglist=userprofile.objects.get_or_create(followers=request.user,following=selected)\n\n\tuse=User.objects.filter(username=request.user).values_list(\"username\",flat=True)\n\tselecteduser=User.objects.get(username=username)\n\tprofilepic=userpost.objects.all()\n\tu=userprofile.objects.filter(followers=request.user).exclude(following=request.user).values_list(\"following__username\",flat=True)\n\tfriendsfollowerlist=userprofile.objects.filter(following=selecteduser).values_list(\"followers__username\",flat=True)\n\treturn render (request,\"friendsfollowers.html\",{\"use\":use,\"u\":u,\"friendsfollowerlist\":friendsfollowerlist,\"profilepic\":profilepic})\n\ndef friendsfollowing(request,username):\n\tselecteduser=User.objects.get(username=username)\n\tfollowinglist=userprofile.objects.filter(followers=selecteduser).values_list(\"following__username\",flat=True)\n\treturn render (request,\"friendsfollowing.html\",{\"followinglist\":followinglist})\n\n\ndef uploads(request):\n\tif request.method==\"POST\":\n\t\tlistuser=[x for x in request.POST.values()][1]\n\t\tselected=User.objects.get(username=listuser)\n\t\tlikeslist=postuploads.objects.get_orcreate(username=request.user,likes=selected)\n\t\n\n\tpost=postuploads.objects.all()\n\tu=userprofile.objects.filter(followers=request.user).values_list(\"following__username\",flat=True)\n\treturn render (request,\"posthome.html\",{\"u\":u,\"post\":post})", "sub_path": "insta/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5472, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "forms.SignUpForm", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.auth.authenticate", "line_number": 16, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 17, "usage_type": "call"}, {"api_name": "forms.SignUpForm", "line_number": 19, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 23, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 23, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 24, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 24, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 25, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 25, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 25, "usage_type": "name"}, {"api_name": "models.userpost.objects.filter", "line_number": 26, "usage_type": "call"}, {"api_name": "models.userpost.objects", "line_number": 26, "usage_type": "attribute"}, {"api_name": "models.userpost", "line_number": 26, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 27, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 33, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 34, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 34, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 35, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 35, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 35, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 36, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 36, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 36, "usage_type": "name"}, {"api_name": "models.userprofile.objects.get_or_create", "line_number": 39, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 39, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 40, "usage_type": "call"}, {"api_name": "models.userprofile.objects.filter", "line_number": 41, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 41, "usage_type": "name"}, {"api_name": "models.userpost.objects.all", "line_number": 42, "usage_type": "call"}, {"api_name": "models.userpost.objects", "line_number": 42, "usage_type": "attribute"}, {"api_name": "models.userpost", "line_number": 42, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 43, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 47, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 47, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 47, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 48, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 48, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 48, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 49, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 49, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 49, "usage_type": "name"}, {"api_name": "models.userpost.objects.filter", "line_number": 50, "usage_type": "call"}, {"api_name": "models.userpost.objects", "line_number": 50, "usage_type": "attribute"}, {"api_name": "models.userpost", "line_number": 50, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call"}, {"api_name": "models.userprofile.objects.filter", "line_number": 54, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 54, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 54, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 57, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 57, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 58, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 58, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 58, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 59, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 59, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 59, "usage_type": "name"}, {"api_name": "models.userprofile.objects.get_or_create", "line_number": 61, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 61, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 61, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 62, "usage_type": "call"}, {"api_name": "models.userprofile.objects.filter", "line_number": 63, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 63, "usage_type": "name"}, {"api_name": "models.userpost.objects.all", "line_number": 64, "usage_type": "call"}, {"api_name": "models.userpost.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "models.userpost", "line_number": 64, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 65, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 70, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 70, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 71, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 71, "usage_type": "name"}, {"api_name": "models.userpost.objects.all", "line_number": 72, "usage_type": "call"}, {"api_name": "models.userpost.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "models.userpost", "line_number": 72, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 73, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 73, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 74, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 79, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 79, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 79, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 80, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 80, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 80, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 81, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 81, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 81, "usage_type": "name"}, {"api_name": "models.userprofile.objects.get_or_create", "line_number": 83, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 83, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 83, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 85, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 85, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 85, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 86, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 86, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 86, "usage_type": "name"}, {"api_name": "models.userpost.objects.all", "line_number": 87, "usage_type": "call"}, {"api_name": "models.userpost.objects", "line_number": 87, "usage_type": "attribute"}, {"api_name": "models.userpost", "line_number": 87, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 88, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 88, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 88, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 89, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 89, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 89, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 90, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 93, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 93, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 93, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 94, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 94, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 94, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 95, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 101, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 101, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 101, "usage_type": "name"}, {"api_name": "models.postuploads.objects.get_orcreate", "line_number": 102, "usage_type": "call"}, {"api_name": "models.postuploads.objects", "line_number": 102, "usage_type": "attribute"}, {"api_name": "models.postuploads", "line_number": 102, "usage_type": "name"}, {"api_name": "models.postuploads.objects.all", "line_number": 105, "usage_type": "call"}, {"api_name": "models.postuploads.objects", "line_number": 105, "usage_type": "attribute"}, {"api_name": "models.postuploads", "line_number": 105, "usage_type": "name"}, {"api_name": "models.userprofile.objects.filter", "line_number": 106, "usage_type": "call"}, {"api_name": "models.userprofile.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "models.userprofile", "line_number": 106, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 107, "usage_type": "call"}]} +{"seq_id": "266832696", "text": "import requests\nimport xlsxwriter\nfrom bs4 import BeautifulSoup\n\n#Get the merchant that the user wants to scrape.\nurl = input(\"Paste your PM link here: \")\nr = requests.get(url)\nsoup = BeautifulSoup(r.text, 'lxml')\nsoup_string = str(soup)\n\nmerchant_name = str(soup.find('h1').text).replace(\" \", \"-\").strip('\\n')\nworkbook = xlsxwriter.Workbook(merchant_name + '-Pricing-List' + '.xlsx')\nworksheet = workbook.add_worksheet()\n \ndiv_name = soup.find_all(\"div\", class_=\"name\")\ndiv_prices = soup.find_all(\"div\", class_=\"price\")\nnum_of_names = (len(div_name))\nnum_of_price = (len(div_prices))\nstr_names = []\nstr_prices = []\n# Since when scraping the names and prices, a list is returned instead of integers and strings.\n# In order to convert all of that, we must iterate through all of the scraped info.\n\nfor i in range(0, num_of_names): #Iteration of the names.\n str_names.append(str(soup.find_all(\"div\", class_=\"name\")[i].text))\nfor i in range(0, num_of_price):\n str_prices.append(str(soup.find_all(\"div\", class_=\"price\")[i].text))\n\n\n# Write names to excel sheet.\nfor i in range(0, (len(str_names)-2)):\n worksheet.write_string(i, 0, str_names[i])\n# Write prices to excel sheet\nfor i in range(0, (len(str_prices))):\n worksheet.write_string(i, 1, str_prices[i])\n\nprint(\"\\nCheck your local folder for an excel sheet listing the items and prices.\")\nworkbook.close()", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1373, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "requests.get", "line_number": 7, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 8, "usage_type": "call"}, {"api_name": "xlsxwriter.Workbook", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "556794043", "text": "# -*- coding: utf-8 -*-\nimport scrapy\nfrom scrapy_splash import SplashRequest\nmain_url = \"https://www.glassdoor.ca\"\nclass ExampleSpider(scrapy.Spider):\n name = 'example'\n #allowed_domains = ['https://www.glassdoor.co.in/Job/canada-data-jobs-SRCH_IL.0,6_IN3_KE7,11.htm']\n start_urls = ['https://www.glassdoor.ca/Job/canada-data-jobs-SRCH_IL.0,6_IN3_KE7,11.htm']\n main_url = \"https://www.glassdoor.ca\"\n #handling javascript pages\n def start_requests(self):\n for url in self.start_urls:\n yield SplashRequest(url, self.parse,\n endpoint='render.html',\n args={'wait': 0.5},\n )\n \n def parse(self, response):\n urls = response.css('li.jl > div > div.flexbox > div > a::attr(href)').extract_first()\n urls = main_url + urls\n self.log(urls)\n for url in urls:\n yield scrapy.Request(url = url, callback = self.parse_details)\n \n\n def parse_details(self, response):\n \n if response.css('div[id = JobDescription] > span[id = TrackingJobBody] > ul'):\n yield { \n 'Job Post' : response.css('div.opening.col-sm-12 > h1::text').extract_first(),\n 'Location' : response.css('div.opening.col-sm-12 > h2::text').extract_first(),\n 'Description' : \"\\n\".join(response.css('div[id = JobDescription] > span[id = TrackingJobBody] > ul > li::text').extract())\n }\n elif response.css('div[id = JobDescription] > span[id = TrackingJobBody]'):\n yield { \n 'Job Post' : response.css('div.opening.col-sm-12 > h1::text').extract_first(),\n 'Location' : response.css('div.opening.col-sm-12 > h2::text').extract_first(),\n 'Description' : \"\\n\".join(response.css('div[id = JobDescription] > span[id = TrackingJobBody]').xpath(\".//text()\").extract()),\n 'Description2' : \"\\n\".join(response.css('div[id = JobDescription] > span[id = TrackingJobBody]::text').extract())\n }", "sub_path": "web scraping/glassdoor data/spiders/example.py", "file_name": "example.py", "file_ext": "py", "file_size_in_byte": 2112, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "scrapy.Spider", "line_number": 5, "usage_type": "attribute"}, {"api_name": "scrapy_splash.SplashRequest", "line_number": 13, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "575408896", "text": "import scipy.sparse\n\n\ndef save_sparse_matrix(x):\n row = x.row\n col = x.col\n data = x.data\n shape = x.shape\n return {'row':row, 'col':col, 'data':data, 'shape':shape}\n\n\ndef load_sparse_matrix(y):\n y = y.item()\n return scipy.sparse.coo_matrix((y['data'], (y['row'], y['col'])), shape=y['shape'])\n", "sub_path": "src/experiments/framework/sparse_io.py", "file_name": "sparse_io.py", "file_ext": "py", "file_size_in_byte": 315, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "scipy.sparse.sparse.coo_matrix", "line_number": 14, "usage_type": "call"}, {"api_name": "scipy.sparse.sparse", "line_number": 14, "usage_type": "attribute"}, {"api_name": "scipy.sparse", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "154597643", "text": "from typing import List, Callable\nfrom statistics import mean\nimport pytest\n\nfrom httpx import AsyncClient\nfrom fastapi import FastAPI, status\n\n\nfrom app.models.cleaning import CleaningInDB\nfrom app.models.user import UserInDB\nfrom app.models.offer import OfferInDB\nfrom app.models.evaluation import EvaluationCreate, EvaluationInDB, EvaluationPublic, EvaluationAggregate\n\n\npytestmark = pytest.mark.asyncio\n\n\nclass TestEvaluationRoutes:\n async def test_routes_exist(self, app: FastAPI, client: AsyncClient) -> None:\n res = await client.post(\n app.url_path_for(\"evaluations:create-evaluation-for-cleaner\", cleaning_id=1, username=\"bradpitt\")\n )\n assert res.status_code != status.HTTP_404_NOT_FOUND\n\n res = await client.get(\n app.url_path_for(\"evaluations:get-evaluation-for-cleaner\", cleaning_id=1, username=\"bradpitt\")\n )\n assert res.status_code != status.HTTP_404_NOT_FOUND\n\n res = await client.get(app.url_path_for(\"evaluations:list-evaluations-for-cleaner\", username=\"bradpitt\"))\n assert res.status_code != status.HTTP_404_NOT_FOUND\n\n res = await client.get(app.url_path_for(\"evaluations:get-stats-for-cleaner\", username=\"bradpitt\"))\n assert res.status_code != status.HTTP_404_NOT_FOUND\n\n\nclass TestCreateEvaluations:\n async def test_owner_can_leave_evaluation_for_cleaner_and_mark_offer_completed(\n self,\n app: FastAPI,\n create_authorized_client: Callable,\n test_user2: UserInDB,\n test_user3: UserInDB,\n test_cleaning_with_accepted_offer: CleaningInDB,\n ) -> None:\n evaluation_create = EvaluationCreate(\n no_show=False,\n headline=\"Excellent job\",\n comment=f\"\"\"\nReally appreciated the hard work and effort they put into this job!\nThough the cleaner took their time, I would definitely hire them again for the quality of their work.\n \"\"\",\n professionalism=5,\n completeness=5,\n efficiency=4,\n overall_rating=5,\n )\n\n authorized_client = create_authorized_client(user=test_user2)\n res = await authorized_client.post(\n app.url_path_for(\n \"evaluations:create-evaluation-for-cleaner\",\n cleaning_id=test_cleaning_with_accepted_offer.id,\n username=test_user3.username,\n ),\n json={\"evaluation_create\": evaluation_create.dict()},\n )\n assert res.status_code == status.HTTP_201_CREATED\n evaluation = EvaluationInDB(**res.json())\n assert evaluation.no_show == evaluation_create.no_show\n assert evaluation.headline == evaluation_create.headline\n assert evaluation.overall_rating == evaluation_create.overall_rating\n\n # check that the offer has now been marked as \"completed\"\n res = await authorized_client.get(\n app.url_path_for(\n \"offers:get-offer-from-user\",\n cleaning_id=test_cleaning_with_accepted_offer.id,\n username=test_user3.username,\n )\n )\n assert res.status_code == status.HTTP_200_OK\n assert res.json()[\"status\"] == \"completed\"\n\n async def test_non_owner_cant_leave_review(\n self,\n app: FastAPI,\n create_authorized_client: Callable,\n test_user4: UserInDB,\n test_user3: UserInDB,\n test_cleaning_with_accepted_offer: CleaningInDB,\n ) -> None:\n authorized_client = create_authorized_client(user=test_user4)\n res = await authorized_client.post(\n app.url_path_for(\n \"evaluations:create-evaluation-for-cleaner\",\n cleaning_id=test_cleaning_with_accepted_offer.id,\n username=test_user3.username,\n ),\n json={\"evaluation_create\": {\"overall_rating\": 2}},\n )\n assert res.status_code == status.HTTP_403_FORBIDDEN\n\n async def test_owner_cant_leave_review_for_wrong_user(\n self,\n app: FastAPI,\n create_authorized_client: Callable,\n test_user2: UserInDB,\n test_user4: UserInDB,\n test_cleaning_with_accepted_offer: CleaningInDB,\n ) -> None:\n authorized_client = create_authorized_client(user=test_user2)\n res = await authorized_client.post(\n app.url_path_for(\n \"evaluations:create-evaluation-for-cleaner\",\n cleaning_id=test_cleaning_with_accepted_offer.id,\n username=test_user4.username,\n ),\n json={\"evaluation_create\": {\"overall_rating\": 1}},\n )\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n\n async def test_owner_cant_leave_multiple_reviews(\n self,\n app: FastAPI,\n create_authorized_client: Callable,\n test_user2: UserInDB,\n test_user3: UserInDB,\n test_cleaning_with_accepted_offer: CleaningInDB,\n ) -> None:\n authorized_client = create_authorized_client(user=test_user2)\n res = await authorized_client.post(\n app.url_path_for(\n \"evaluations:create-evaluation-for-cleaner\",\n cleaning_id=test_cleaning_with_accepted_offer.id,\n username=test_user3.username,\n ),\n json={\"evaluation_create\": {\"overall_rating\": 3}},\n )\n assert res.status_code == status.HTTP_201_CREATED\n\n res = await authorized_client.post(\n app.url_path_for(\n \"evaluations:create-evaluation-for-cleaner\",\n cleaning_id=test_cleaning_with_accepted_offer.id,\n username=test_user3.username,\n ),\n json={\"evaluation_create\": {\"overall_rating\": 1}},\n )\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n\n\nclass TestGetEvaluations:\n \"\"\"\n Test that authenticated user who is not owner or cleaner can fetch a single evaluation\n Test that authenticated user can fetch all of a cleaner's evaluations\n Test that a cleaner's evaluations comes with an aggregate\n \"\"\"\n\n async def test_authenticated_user_can_get_evaluation_for_cleaning(\n self,\n app: FastAPI,\n create_authorized_client: Callable,\n test_user3: UserInDB,\n test_user4: UserInDB,\n test_list_of_cleanings_with_evaluated_offer: List[CleaningInDB],\n ) -> None:\n authorized_client = create_authorized_client(user=test_user4)\n res = await authorized_client.get(\n app.url_path_for(\n \"evaluations:get-evaluation-for-cleaner\",\n cleaning_id=test_list_of_cleanings_with_evaluated_offer[0].id,\n username=test_user3.username,\n )\n )\n assert res.status_code == status.HTTP_200_OK\n evaluation = EvaluationPublic(**res.json())\n assert evaluation.cleaning_id == test_list_of_cleanings_with_evaluated_offer[0].id\n assert evaluation.cleaner_id == test_user3.id\n assert \"test headline\" in evaluation.headline\n assert \"test comment\" in evaluation.comment\n assert evaluation.professionalism >= 0 and evaluation.professionalism <= 5\n assert evaluation.completeness >= 0 and evaluation.completeness <= 5\n assert evaluation.efficiency >= 0 and evaluation.efficiency <= 5\n assert evaluation.overall_rating >= 0 and evaluation.overall_rating <= 5\n\n async def test_authenticated_user_can_get_list_of_evaluations_for_cleaner(\n self,\n app: FastAPI,\n create_authorized_client: Callable,\n test_user3: UserInDB,\n test_user4: UserInDB,\n # test_list_of_cleanings_with_evaluated_offer: List[CleaningInDB],\n ) -> None:\n authorized_client = create_authorized_client(user=test_user4)\n res = await authorized_client.get(\n app.url_path_for(\"evaluations:list-evaluations-for-cleaner\", username=test_user3.username)\n )\n assert res.status_code == status.HTTP_200_OK\n evaluations = [EvaluationPublic(**e) for e in res.json()]\n assert len(evaluations) > 1\n for evaluation in evaluations:\n assert evaluation.cleaner_id == test_user3.id\n assert evaluation.overall_rating >= 0\n\n async def test_authenticated_user_can_get_aggregate_stats_for_cleaner(\n self,\n app: FastAPI,\n create_authorized_client: Callable,\n test_user3: UserInDB,\n test_user4: UserInDB,\n test_list_of_cleanings_with_evaluated_offer: List[CleaningInDB],\n ) -> None:\n authorized_client = create_authorized_client(user=test_user4)\n res = await authorized_client.get(\n app.url_path_for(\"evaluations:list-evaluations-for-cleaner\", username=test_user3.username)\n )\n assert res.status_code == status.HTTP_200_OK\n evaluations = [EvaluationPublic(**e) for e in res.json()]\n\n res = await authorized_client.get(\n app.url_path_for(\"evaluations:get-stats-for-cleaner\", username=test_user3.username)\n )\n assert res.status_code == status.HTTP_200_OK\n stats = EvaluationAggregate(**res.json())\n\n assert len(evaluations) == stats.total_evaluations\n assert max([e.overall_rating for e in evaluations]) == stats.max_overall_rating\n assert min([e.overall_rating for e in evaluations]) == stats.min_overall_rating\n assert mean([e.overall_rating for e in evaluations]) == stats.avg_overall_rating\n assert (\n mean([e.professionalism for e in evaluations if e.professionalism is not None]) == stats.avg_professionalism\n )\n assert mean([e.completeness for e in evaluations if e.completeness is not None]) == stats.avg_completeness\n assert mean([e.efficiency for e in evaluations if e.efficiency is not None]) == stats.avg_efficiency\n assert len([e for e in evaluations if e.overall_rating == 1]) == stats.one_stars\n assert len([e for e in evaluations if e.overall_rating == 2]) == stats.two_stars\n assert len([e for e in evaluations if e.overall_rating == 3]) == stats.three_stars\n assert len([e for e in evaluations if e.overall_rating == 4]) == stats.four_stars\n assert len([e for e in evaluations if e.overall_rating == 5]) == stats.five_stars\n\n async def test_unauthenticated_user_forbidden_from_get_requests(\n self,\n app: FastAPI,\n client: AsyncClient,\n test_user3: UserInDB,\n test_list_of_cleanings_with_evaluated_offer: List[CleaningInDB],\n ) -> None:\n res = await client.get(\n app.url_path_for(\n \"evaluations:get-evaluation-for-cleaner\",\n cleaning_id=test_list_of_cleanings_with_evaluated_offer[0].id,\n username=test_user3.username,\n )\n )\n assert res.status_code == status.HTTP_401_UNAUTHORIZED\n\n res = await client.get(\n app.url_path_for(\"evaluations:list-evaluations-for-cleaner\", username=test_user3.username)\n )\n assert res.status_code == status.HTTP_401_UNAUTHORIZED\n\n", "sub_path": "backend/tests/test_evaluations.py", "file_name": "test_evaluations.py", "file_ext": "py", "file_size_in_byte": 11073, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pytest.mark", "line_number": 15, "usage_type": "attribute"}, {"api_name": "fastapi.FastAPI", "line_number": 19, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 19, "usage_type": "name"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 21, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 21, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_404_NOT_FOUND", "line_number": 23, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 23, "usage_type": "name"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 26, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 26, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_404_NOT_FOUND", "line_number": 28, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 28, "usage_type": "name"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 30, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 30, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_404_NOT_FOUND", "line_number": 31, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 31, "usage_type": "name"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 33, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 33, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_404_NOT_FOUND", "line_number": 34, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 34, "usage_type": "name"}, {"api_name": "fastapi.FastAPI", "line_number": 40, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 41, "usage_type": "name"}, {"api_name": "app.models.user.UserInDB", "line_number": 42, "usage_type": "name"}, {"api_name": "app.models.user.UserInDB", "line_number": 43, "usage_type": "name"}, {"api_name": "app.models.cleaning.CleaningInDB", "line_number": 44, "usage_type": "name"}, {"api_name": "app.models.evaluation.EvaluationCreate", "line_number": 46, "usage_type": "call"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 61, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 61, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_201_CREATED", "line_number": 68, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 68, "usage_type": "name"}, {"api_name": "app.models.evaluation.EvaluationInDB", "line_number": 69, "usage_type": "call"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 76, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 76, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_200_OK", "line_number": 82, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 82, "usage_type": "name"}, {"api_name": "fastapi.FastAPI", "line_number": 87, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 88, "usage_type": "name"}, {"api_name": "app.models.user.UserInDB", "line_number": 89, "usage_type": "name"}, {"api_name": "app.models.user.UserInDB", "line_number": 90, "usage_type": "name"}, {"api_name": "app.models.cleaning.CleaningInDB", "line_number": 91, "usage_type": "name"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 95, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 95, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_403_FORBIDDEN", "line_number": 102, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 102, "usage_type": "name"}, {"api_name": "fastapi.FastAPI", "line_number": 106, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 107, "usage_type": "name"}, {"api_name": "app.models.user.UserInDB", "line_number": 108, "usage_type": "name"}, {"api_name": "app.models.user.UserInDB", "line_number": 109, "usage_type": "name"}, {"api_name": "app.models.cleaning.CleaningInDB", "line_number": 110, "usage_type": "name"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 114, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 114, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_400_BAD_REQUEST", "line_number": 121, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 121, "usage_type": "name"}, {"api_name": "fastapi.FastAPI", "line_number": 125, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 126, "usage_type": "name"}, {"api_name": "app.models.user.UserInDB", "line_number": 127, "usage_type": "name"}, {"api_name": "app.models.user.UserInDB", "line_number": 128, "usage_type": "name"}, {"api_name": "app.models.cleaning.CleaningInDB", "line_number": 129, "usage_type": "name"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 133, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 133, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_201_CREATED", "line_number": 140, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 140, "usage_type": "name"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 143, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 143, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_400_BAD_REQUEST", "line_number": 150, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 150, "usage_type": "name"}, {"api_name": "fastapi.FastAPI", "line_number": 162, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 163, "usage_type": "name"}, {"api_name": "app.models.user.UserInDB", "line_number": 164, "usage_type": "name"}, {"api_name": "app.models.user.UserInDB", "line_number": 165, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 166, "usage_type": "name"}, {"api_name": "app.models.cleaning.CleaningInDB", "line_number": 166, "usage_type": "name"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 170, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 170, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_200_OK", "line_number": 176, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 176, "usage_type": "name"}, {"api_name": "app.models.evaluation.EvaluationPublic", "line_number": 177, "usage_type": "call"}, {"api_name": "fastapi.FastAPI", "line_number": 189, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 190, "usage_type": "name"}, {"api_name": "app.models.user.UserInDB", "line_number": 191, "usage_type": "name"}, {"api_name": "app.models.user.UserInDB", "line_number": 192, "usage_type": "name"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 197, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 197, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_200_OK", "line_number": 199, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 199, "usage_type": "name"}, {"api_name": "app.models.evaluation.EvaluationPublic", "line_number": 200, "usage_type": "call"}, {"api_name": "fastapi.FastAPI", "line_number": 208, "usage_type": "name"}, {"api_name": "typing.Callable", "line_number": 209, "usage_type": "name"}, {"api_name": "app.models.user.UserInDB", "line_number": 210, "usage_type": "name"}, {"api_name": "app.models.user.UserInDB", "line_number": 211, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 212, "usage_type": "name"}, {"api_name": "app.models.cleaning.CleaningInDB", "line_number": 212, "usage_type": "name"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 216, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 216, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_200_OK", "line_number": 218, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 218, "usage_type": "name"}, {"api_name": "app.models.evaluation.EvaluationPublic", "line_number": 219, "usage_type": "call"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 222, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 222, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_200_OK", "line_number": 224, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 224, "usage_type": "name"}, {"api_name": "app.models.evaluation.EvaluationAggregate", "line_number": 225, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 230, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 232, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 234, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 235, "usage_type": "call"}, {"api_name": "fastapi.FastAPI", "line_number": 244, "usage_type": "name"}, {"api_name": "httpx.AsyncClient", "line_number": 245, "usage_type": "name"}, {"api_name": "app.models.user.UserInDB", "line_number": 246, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 247, "usage_type": "name"}, {"api_name": "app.models.cleaning.CleaningInDB", "line_number": 247, "usage_type": "name"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 250, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 250, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_401_UNAUTHORIZED", "line_number": 256, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 256, "usage_type": "name"}, {"api_name": "app.models.cleaning.url_path_for", "line_number": 259, "usage_type": "call"}, {"api_name": "app.models.cleaning", "line_number": 259, "usage_type": "name"}, {"api_name": "fastapi.status.HTTP_401_UNAUTHORIZED", "line_number": 261, "usage_type": "attribute"}, {"api_name": "fastapi.status", "line_number": 261, "usage_type": "name"}]} +{"seq_id": "260356475", "text": "# Copyright 2021 AI Singapore\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nMain engine for Peekingduck processes\n\"\"\"\n\nimport copy\nimport sys\nimport logging\nfrom typing import List\nfrom peekingduck.pipeline.pipeline import Pipeline\nfrom peekingduck.declarative_loader import DeclarativeLoader\nfrom peekingduck.pipeline.nodes.node import AbstractNode\n\n\nclass Runner():\n \"\"\"\n The runner class for creation of pipeline using declared/given nodes.\n\n The runner class uses the provided configurations to setup a node pipeline\n which is used to run inference.\n\n Args:\n\n RUN_PATH (:obj:`str`): If path to a run_config.yml is provided, uses \\\n our declarative loader to load the yaml file according to our specified \\\n schema to obtain the declared nodes that would be sequentially \\\n initialized and used to create the pipeline for running inference. \\\n\n config_updates_cli (:obj:`str`): config changes passed as part of the \\\n cli command sed to modify the node configurations direct from cli.\n\n CUSTOM_NODE_PARENT_FOLDER (:obj:`str`): path to folder which contains \\\n custom nodes that users have created to be used with PeekingDuck. \\\n For more information on using custom nodes, please refer to \\\n `getting started `_.\n\n nodes (:obj:`list` of :obj:`Node`): if not using declarations via yaml, \\\n initialize by giving the node stack directly as a list.\n\n \"\"\"\n\n def __init__(self,\n RUN_PATH: str = \"\",\n config_updates_cli: str = None,\n CUSTOM_NODE_PARENT_FOLDER: str = None,\n nodes: List[AbstractNode] = None):\n\n self.logger = logging.getLogger(__name__)\n\n if not nodes and RUN_PATH:\n # create Graph to run\n self.node_loader = DeclarativeLoader(\n RUN_PATH, config_updates_cli, CUSTOM_NODE_PARENT_FOLDER) # type: ignore\n\n self.pipeline = self.node_loader.get_pipeline()\n\n # If Runner given nodes, instantiated_nodes is created differently\n else:\n try:\n self.pipeline = Pipeline(nodes) # type: ignore\n except ValueError as error:\n self.logger.error(str(error))\n sys.exit(1)\n\n def run(self) -> None:\n \"\"\"execute single or continuous inference\n \"\"\"\n while not self.pipeline.terminate:\n for node in self.pipeline.nodes:\n if \"pipeline_end\" in self.pipeline.data and \\\n self.pipeline.data[\"pipeline_end\"]: # type: ignore\n\n self.pipeline.terminate = True\n if \"pipeline_end\" not in node.inputs:\n continue\n\n if \"all\" in node.inputs:\n inputs = copy.deepcopy(self.pipeline.data)\n else:\n inputs = {key: self.pipeline.data[key]\n for key in node.inputs if key in self.pipeline.data}\n\n outputs = node.run(inputs)\n self.pipeline.data.update(outputs) # type: ignore\n\n def get_run_config(self) -> List[str]:\n \"\"\"retrieve run configs\n\n Returns:\n (:obj:`Dict`: run configs being used for runner\n \"\"\"\n return self.node_loader.node_list\n", "sub_path": "peekingduck/runner.py", "file_name": "runner.py", "file_ext": "py", "file_size_in_byte": 3871, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "typing.List", "line_number": 59, "usage_type": "name"}, {"api_name": "peekingduck.pipeline.nodes.node.AbstractNode", "line_number": 59, "usage_type": "name"}, {"api_name": "logging.getLogger", "line_number": 61, "usage_type": "call"}, {"api_name": "peekingduck.declarative_loader.DeclarativeLoader", "line_number": 65, "usage_type": "call"}, {"api_name": "peekingduck.pipeline.pipeline.Pipeline", "line_number": 73, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 76, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 91, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 99, "usage_type": "name"}]} +{"seq_id": "255603597", "text": "import re\nfrom functools import reduce\n\nclass SCSSExpand():\n def __init__(self, startpos, get_char_fn, separator = ' '):\n self.selectors = []\n self.comment_blocks = [] #array of tuples /*123*/ - will give (0, 6) - inclusive!\n self.separator = separator\n self.get_char_fn = get_char_fn\n self.startpos = startpos\n\n def coalesce_rule(self):\n self.comment_machine(self.startpos)\n if self.check_block_comment(self.startpos):\n self.startpos = self.skip_comment(self.startpos)\n self.selector_machine(self.startpos)\n self.process_at_root()\n\n selector_array = [x for x in self.selectors if not re.search('@(for|each|while|if|else)', x)]\n selector_array = list(map(self.process_selector, selector_array))\n\n ### Past this point are mostly differences in formatting\n # If loop directive information must be retained,\n # modify the filter above\n self.generate_expanded(selector_array)\n return self.strip_whitespace((',' + self.separator).join(self.selectors))\n\n # We read forwards for comment blocks\n def comment_machine(self, endpos):\n startpos = 0\n endpos -= 1\n commentstart = None\n commentend = None\n while startpos <= endpos:\n char = self.get_char_fn(startpos)\n\n # Single line comments\n if char == '/' and self.forward_lookahead(startpos) == '/':\n commentstart = startpos\n while char != '\\n':\n if startpos == endpos + 1:\n self.comment_blocks.append((commentstart, endpos))\n return\n startpos += 1\n char = self.get_char_fn(startpos)\n commentend = startpos\n self.comment_blocks.append((commentstart, commentend))\n\n # Block comments\n elif char == '/' and self.forward_lookahead(startpos) == '*':\n commentstart = startpos\n startpos += 2 # move past the opening block so that /*/ is still commented\n char = self.get_char_fn(startpos)\n while char != '*' or self.forward_lookahead(startpos) != '/':\n if startpos == endpos:\n self.comment_blocks.append((commentstart, endpos + 1))\n return\n startpos += 1\n char = self.get_char_fn(startpos)\n\n startpos += 1\n commentend = startpos\n self.comment_blocks.append((commentstart, commentend))\n\n startpos += 1\n\n def skip_comment(self, pos):\n for commentstart, commentend in self.comment_blocks:\n if pos == commentend:\n return commentstart - 1\n return pos\n\n def selector_machine(self, cursorpos):\n position = self.push_next_selector(cursorpos)\n while position >= 0:\n position = self.push_next_selector(position)\n self.selectors = self.selectors[::-1]\n\n def push_next_selector(self, startpos):\n bracket_counter = 0\n\n while bracket_counter > -1 and startpos >= 0:\n char = self.get_char_fn(startpos)\n if char == '/' and self.lookahead(startpos) == '*':\n startpos = self.skip_comment(startpos)\n elif char == '{':\n is_comment, startpos = self.check_comment(startpos)\n if not is_comment:\n bracket_counter -= 1\n elif char == '}':\n is_comment, startpos = self.check_comment(startpos)\n if not is_comment:\n bracket_counter += 1\n startpos -= 1\n\n # handle the case of interpolation\n if (bracket_counter < 0 and self.get_char_fn(startpos) != '#'):\n self.gather_selector(startpos)\n\n return startpos\n\n def forward_lookahead(self, pos):\n return self.get_char_fn(pos + 1)\n\n def lookahead(self, pos):\n return self.get_char_fn(pos - 1)\n\n def gather_selector(self, openposition):\n selector = ''\n selectorposition = openposition\n char = self.get_char_fn(selectorposition)\n\n while char != ';' and char != '{' and selectorposition >= 0:\n if char == '\\n' or (char == '/' and self.lookahead(selectorposition) == '*'):\n selectorposition = self.skip_comment(selectorposition)\n char = self.get_char_fn(selectorposition)\n\n if char == '}':\n stringbuffer = '}'\n selectorposition -= 1\n\n while char != '{' and selectorposition >= 0:\n char = self.get_char_fn(selectorposition)\n stringbuffer += char\n selectorposition -= 1\n\n if char == '{' and self.get_char_fn(selectorposition) == '#':\n selector += stringbuffer\n char = self.get_char_fn(selectorposition)\n else:\n break\n\n if selectorposition >= 0:\n selector += char\n selectorposition -= 1\n char = self.get_char_fn(selectorposition)\n\n if len(selector) > 0:\n selector = self.strip_whitespace(selector)\n self.selectors.append(selector[::-1])\n\n # Returns whether the line is a comment and\n # the number of the first character of the comment ('/')\n # [True, 32]\n # If it is not a commented line, return the same selectorpos\n def check_comment(self, selectorposition):\n return [self.check_block_comment(selectorposition), self.skip_comment(selectorposition)]\n\n # Returns False if the startpos is not in a block comment,\n # returns True if it is\n def check_block_comment(self, selectorposition):\n for commentstart, commentend in self.comment_blocks:\n if selectorposition >= commentstart and selectorposition <= commentend:\n return True\n return False\n\n def process_at_root(self):\n selectors = self.selectors\n # Group 1: with/without\n # Group 2: space-separated list of with/without directives\n AT_ROOT_RE = re.compile(r'@at-root\\s*(?:\\((with|without)\\s*:\\s*((?:\\w+\\s?)+)\\))?\\s*')\n at_root_index = None\n at_root = dict() # only the final at-root is important\n for index, selector in enumerate(selectors):\n at_root_match = re.search(AT_ROOT_RE, selector)\n if at_root_match:\n at_root_index = index\n at_root['exclusion'] = at_root_match.group(1)\n if at_root_match.group(2):\n at_root['directives'] = at_root_match.group(2).split(' ')\n else:\n at_root['directives'] = []\n\n if not at_root_index:\n return\n\n directive_re = re.compile('@(' + '|'.join(at_root['directives']) +')')\n\n # @at-root has two special values, 'all' and 'rule'\n if at_root['exclusion'] == 'without' and 'all' in at_root['directives']:\n selectors[at_root_index] = re.sub(AT_ROOT_RE, '', selectors[at_root_index])\n self.selectors = selectors[at_root_index:] #discard everything\n return\n elif at_root['exclusion'] == 'with' and 'rule' in at_root['directives']:\n # keep listed directives and all rules\n filter_func = lambda x: re.match(directive_re, x) or not re.match('@', x)\n elif at_root['exclusion'] == 'with':\n # keep only listed directives\n filter_func = lambda x: re.match(directive_re, x)\n elif at_root['exclusion'] == None:\n # just keep directives\n filter_func = lambda x: re.match('@', x)\n elif at_root['exclusion'] == 'without':\n # keep directives but not those listed. without rule is in this case\n filter_func = lambda x: re.match('@', x) and not re.match(directive_re, x)\n\n allowed_directives = list(filter(filter_func, selectors[:at_root_index]))\n selectors[at_root_index] = re.sub(AT_ROOT_RE, '', selectors[at_root_index])\n self.selectors = allowed_directives + selectors[at_root_index:]\n\n def process_selector(self, selector):\n split = []\n if '@' in selector:\n split = [selector]\n else:\n split = selector.split(',')\n return split\n\n # selector array goes forward\n # generate_expanded takes an array of arrays and joins them together\n # e.g. [['.hello', '.there'], ['.one', '.two']]\n # gives ['.hello .one', '.hello .two', '.there .one', '.there .two']\n def generate_expanded(self, selector_array):\n\n def comma_reducer(array, following_array):\n results = []\n for selector in array:\n for sel in following_array:\n stripped_selector = self.strip_whitespace(selector)\n if '&' in sel:\n results.append(sel.replace('&', stripped_selector))\n else:\n results.append(stripped_selector + self.separator + self.strip_whitespace(sel))\n return results\n\n self.selectors = reduce(comma_reducer, selector_array)\n\n def strip_whitespace(self, selector):\n return selector.strip()\n", "sub_path": "src/src_three/scss_expand.py", "file_name": "scss_expand.py", "file_ext": "py", "file_size_in_byte": 8251, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "re.search", "line_number": 19, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 161, "usage_type": "call"}, {"api_name": "re.search", "line_number": 165, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 177, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 181, "usage_type": "call"}, {"api_name": "re.match", "line_number": 186, "usage_type": "call"}, {"api_name": "re.match", "line_number": 189, "usage_type": "call"}, {"api_name": "re.match", "line_number": 192, "usage_type": "call"}, {"api_name": "re.match", "line_number": 195, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 198, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 226, "usage_type": "call"}]} +{"seq_id": "401973910", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 09 21:20:23 2017\r\n\r\n@author: Zimu\r\n\"\"\"\r\n\r\n# import dicom # for reading dicom files\r\nimport os # for doing directory operations\r\nimport pandas as pd # for some simple data analysis (right now, just to load in the labels data and quickly reference it)\r\nimport numpy as np\r\nimport scipy.ndimage\r\nimport scipy.misc\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image as img\r\nfrom skimage import measure#, morphology\r\n#from skimage.segmentation import slic\r\nfrom mpl_toolkits.mplot3d.art3d import Poly3DCollection\r\nimport math\r\nimport tensorflow as tf\r\nfrom six.moves import xrange\r\n\r\n\r\nEVAL_BATCH_SIZE = 1\r\nIMG_SIZE_PX = 64\r\nSLICE_COUNT = 24\r\nn_classes = 2\r\nprocessData = False\r\nkeep_rate = 0.8\r\n#Below is code to load a scan, which consists of multiple slices, which we simply save in a Python list.\r\n#Every folder in the dataset is one scan (so one patient). One metadata field is missing, the pixel size in the Z direction,\r\n#which is the slice thickness. Fortunately we can infer this, and we add this to the metadata\r\n\r\ndef load_scan(path):\r\n slices = [dicom.read_file(path + '/' + s) for s in os.listdir(path)]\r\n slices.sort(key = lambda x: float(x.ImagePositionPatient[2]))\r\n try:\r\n slice_thickness = np.abs(slices[0].ImagePositionPatient[2] - slices[1].ImagePositionPatient[2])\r\n except:\r\n slice_thickness = np.abs(slices[0].SliceLocation - slices[1].SliceLocation)\r\n\r\n for s in slices:\r\n s.SliceThickness = slice_thickness\r\n#\r\n return slices\r\n\r\n#Some scanners have cylindrical scanning bounds, but the output image is square.\r\n#The pixels that fall outside of these bounds get the fixed value -2000.\r\n#The first step is setting these values to 0, which currently corresponds to air.\r\n#Next, let's go back to HU units, by multiplying with the rescale slope and adding the intercept\r\n#(which are conveniently stored in the metadata of the scans!).\r\ndef get_pixels_hu(slices):\r\n# credit to Guido Zuidhof https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial\r\n image = np.stack([s.pixel_array for s in slices])\r\n # Convert to int16 (from sometimes int16),\r\n # should be possible as values should always be low enough (<32k)\r\n image = image.astype(np.int16)\r\n\r\n # Set outside-of-scan pixels to 0\r\n # The intercept is usually -1024, so air is approximately 0\r\n image[image == -2000] = 0\r\n\r\n # Convert to Hounsfield units (HU)\r\n for slice_number in range(len(slices)):\r\n\r\n intercept = slices[slice_number].RescaleIntercept\r\n slope = slices[slice_number].RescaleSlope\r\n\r\n if slope != 1:\r\n image[slice_number] = slope * image[slice_number].astype(np.float64)\r\n image[slice_number] = image[slice_number].astype(np.int16)\r\n\r\n image[slice_number] += np.int16(intercept)\r\n\r\n return np.array(image, dtype=np.int16)\r\n\r\n# A scan may have a pixel spacing of [2.5, 0.5, 0.5], which means that the distance between slices is 2.5 millimeters.\r\n# For a different scan this may be [1.5, 0.725, 0.725], this can be problematic for automatic analysis (e.g. using ConvNets)!\r\n# A common method of dealing with this is resampling the full dataset to a certain isotropic resolution.\r\n# If we choose to resample everything to 1mm1mm1mm pixels we can use 3D convnets without worrying about\r\n# learning zoom/slice thickness invariance.\r\n\r\ndef resample(image, scan, new_spacing=[1,1,1]):\r\n # Determine current pixel spacing\r\n # credit to Guido Zuidhof https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial\r\n spacing = np.array([scan[0].SliceThickness] + scan[0].PixelSpacing, dtype=np.float32)\r\n\r\n resize_factor = spacing / new_spacing\r\n new_real_shape = image.shape * resize_factor\r\n new_shape = np.round(new_real_shape)\r\n real_resize_factor = new_shape / image.shape\r\n new_spacing = spacing / real_resize_factor\r\n\r\n image = scipy.ndimage.interpolation.zoom(image, real_resize_factor, mode='nearest')\r\n\r\n return image\r\n\r\ndef plot_3d(image, threshold=-300):\r\n # credit to Guido Zuidhof https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial\r\n # Position the scan upright,\r\n # so the head of the patient would be at the top facing the camera\r\n p = image.transpose(2,1,0)\r\n\r\n verts, faces = measure.marching_cubes(p, threshold)\r\n\r\n fig = plt.figure(figsize=(10, 10))\r\n ax = fig.add_subplot(111, projection='3d')\r\n\r\n # Fancy indexing: `verts[faces]` to generate a collection of triangles\r\n mesh = Poly3DCollection(verts[faces], alpha=0.70)\r\n face_color = [0.45, 0.45, 0.75]\r\n mesh.set_facecolor(face_color)\r\n ax.add_collection3d(mesh)\r\n\r\n ax.set_xlim(0, p.shape[0])\r\n ax.set_ylim(0, p.shape[1])\r\n ax.set_zlim(0, p.shape[2])\r\n\r\n plt.show()\r\n\r\n#Threshold the image (-320 HU is a good threshold, but it doesn't matter much for this approach)\r\n#Do connected components, determine label of air around person, fill this with 1s in the binary image\r\n#Optionally: For every axial slice in the scan, determine the largest solid connected component (the body+air around the person), and set others to 0. This fills the structures in the lungs in the mask.\r\n#Keep only the largest air pocket (the human body has other pockets of air here and there).\r\ndef largest_label_volume(im, bg=-1):\r\n # credit to Guido Zuidhof https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial\r\n vals, counts = np.unique(im, return_counts=True)\r\n\r\n counts = counts[vals != bg]\r\n vals = vals[vals != bg]\r\n\r\n if len(counts) > 0:\r\n return vals[np.argmax(counts)]\r\n else:\r\n return None\r\n\r\ndef segment_lung_mask(image, fill_lung_structures=True):\r\n # credit to Guido Zuidhof https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial\r\n # not actually binary, but 1 and 2.\r\n # 0 is treated as background, which we do not want\r\n binary_image = np.array(image > -320, dtype=np.int8)+1\r\n labels = measure.label(binary_image)\r\n\r\n # Pick the pixel in the very corner to determine which label is air.\r\n # Improvement: Pick multiple background labels from around the patient\r\n # More resistant to \"trays\" on which the patient lays cutting the air\r\n # around the person in half\r\n background_label = labels[0,0,0]\r\n\r\n #Fill the air around the person\r\n binary_image[background_label == labels] = 2\r\n\r\n\r\n # Method of filling the lung structures (that is superior to something like\r\n # morphological closing)\r\n if fill_lung_structures:\r\n # For every slice we determine the largest solid structure\r\n for i, axial_slice in enumerate(binary_image):\r\n axial_slice = axial_slice - 1\r\n labeling = measure.label(axial_slice)\r\n l_max = largest_label_volume(labeling, bg=0)\r\n\r\n if l_max is not None: #This slice contains some lung\r\n binary_image[i][labeling != l_max] = 1\r\n\r\n\r\n binary_image -= 1 #Make the image actual binary\r\n binary_image = 1-binary_image # Invert it, lungs are now 1\r\n\r\n # Remove other air pockets insided body\r\n labels = measure.label(binary_image, background=0)\r\n l_max = largest_label_volume(labels, bg=0)\r\n if l_max is not None: # There are air pockets\r\n binary_image[labels != l_max] = 0\r\n\r\n return binary_image\r\n\r\n\r\n\r\n\r\ndef normalize(image,MIN_BOUND = -1000.0,MAX_BOUND = 400.0):\r\n # credit to Guido Zuidhof https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial\r\n image = (image - MIN_BOUND) / (MAX_BOUND - MIN_BOUND)\r\n image[image>1] = 1.\r\n image[image<0] = 0.\r\n return image\r\n\r\n\r\ndef zero_center(image,PIXEL_MEAN = 0.25):\r\n # credit to Guido Zuidhof https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial\r\n image = image - PIXEL_MEAN\r\n return image\r\ndef chunks(l, n,HM_SLICES = 20):\r\n # Credit: Ned Batchelder\r\n # Link: http://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks\r\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\r\n count=0\r\n for i in range(0, len(l), n):\r\n if(count < HM_SLICES):\r\n yield l[i:i + n]\r\n count=count+1\r\n\r\ndef mean(l):\r\n # credit to Guido Zuidhof https://www.kaggle.com/gzuidhof/data-science-bowl-2017/full-preprocessing-tutorial\r\n return sum(l) / len(l)\r\ndef process_data(patient,labels_df,data_dir,img_px_size=50, hm_slices=20, visualize=False, superPixels = 100):\r\n\r\n #lung segmentation\r\n label = labels_df.get_value(patient, 'cancer')\r\n path = data_dir + patient\r\n slices = load_scan(path)\r\n if (len(slices)==0):\r\n print(\"No Data\")\r\n return ([0, 0])\r\n slices.sort(key = lambda x: int(x.ImagePositionPatient[2]))\r\n slices_pixels = get_pixels_hu(slices)\r\n\r\n segmented_lungs_fill = segment_lung_mask(np.asarray(slices_pixels), True)\r\n# superPixelImage = slic(segmented_lungs_fill,n_segments=superPixels,compactness = 0.04, multichannel = False)\r\n# pix_resampled = resample(slices_pixels, slices, [1,1,1])\r\n newSlicesPixels = []\r\n for each_slice in range(0,segmented_lungs_fill.shape[0]):\r\n# superPixelImage = slic(segmented_lungs_fill[each_slice],n_segments=superPixels,compactness = 0.04, multichannel = False)\r\n# superPixelImage = scipy.ndimage.filters.gaussian_filter(superPixelImage,.5)\r\n# superPixMax = (np.amax(superPixelImage[each_slice]))\r\n# im = img.fromarray(np.uint8(superPixelImage/superPixMax)*255).resize((img_px_size,img_px_size))\r\n# im = np.fromstring(im.tobytes(),dtype=np.uint8)/255*superPixMax.reshape(img_px_size,img_px_size)\r\n im = img.fromarray(np.uint8(segmented_lungs_fill[each_slice])*255).resize((img_px_size,img_px_size))\r\n im = (np.fromstring(im.tobytes(),dtype=np.uint8)/255).reshape(img_px_size,img_px_size)\r\n newSlicesPixels.append(im)\r\n\r\n newSlicesPixels = np.asarray(newSlicesPixels)\r\n\r\n new_slices = []\r\n chunk_sizes = math.floor(newSlicesPixels.shape[0] / hm_slices)\r\n for slice_chunk in chunks(newSlicesPixels, int(chunk_sizes),hm_slices):\r\n slice_chunk = list(map(mean, zip(*slice_chunk)))\r\n new_slices.append(slice_chunk)\r\n\r\n if visualize:\r\n fig = plt.figure()\r\n for num,each_slice in enumerate(new_slices):\r\n y = fig.add_subplot(4,5,num+1)\r\n y.imshow(each_slice, cmap='gray')\r\n plt.show()\r\n\r\n if label == 1: label=np.array([0,1])\r\n elif label == 0: label=np.array([1,0])\r\n\r\n return np.array(new_slices),label\r\n\r\n#tf.nn.conv3d(input, filter, strides, padding, name=None)\r\ndef batchnorm_layer(Ylogits, is_test, Offset, Scale, iteration, convolutional=False):\r\n#credit to Martin Gorner https://github.com/martin-gorner/tensorflow-mnist-tutorial/blob/master/mnist_4.2_batchnorm_convolutional.py\r\n exp_moving_avg = tf.train.ExponentialMovingAverage(0.9999,iteration)\r\n if convolutional:\r\n mean, variance = tf.nn.moments(Ylogits, [0, 1, 2, 3])\r\n else:\r\n mean, variance = tf.nn.moments(Ylogits, [0])\r\n update_moving_averages = exp_moving_avg.apply([mean, variance])\r\n m = tf.cond(is_test, lambda: exp_moving_avg.averge(mean), lambda: mean)\r\n v = tf.cond(is_test, lambda: exp_moving_avg.average(variance), lambda:variance)\r\n Ybn = tf.nn.batch_normalization(Ylogits,m,v,Offset,Scale,variance_epsilon=1e-5)\r\n return Ybn, update_moving_averages\r\ndef conv3d(x, W, padding='Same'):\r\n return tf.nn.conv3d(x, W, strides=[1,1,1,1,1], padding='SAME')\r\n#tf.nn.conv3d_transpose(value, filter, output_shape, strides, padding='SAME', name=None)\r\ndef conv3dT(x, W, outputShape, padding='Same'):\r\n return tf.nn.conv3d_transpose(x, W, outputShape,strides=[1,2,2,2,1], padding='SAME')\r\n\r\ndef maxpool3d(x):\r\n # size of window movement of window as you slide about\r\n return tf.nn.max_pool3d(x, ksize=[1,2,2,2,1], strides=[1,2,2,2,1], padding='SAME')\r\ndef convolutional_neural_network(x,n_classes,IMG_SIZE_PX,SLICE_COUNT,keep_rate):\r\n#test convolutional network. Two layers with FC\r\n# Credit to sentdex for providing tutorial at https://www.kaggle.com/sentdex/data-science-bowl-2017/first-pass-through-data-w-3d-convnet\r\n weights = {'W_conv1':tf.Variable(tf.random_normal([3,3,3,1,32])),\r\n # 5 x 5 x 5 patches, 32 channels, 64 features to compute.\r\n 'W_conv2':tf.Variable(tf.random_normal([3,3,3,32,64])),\r\n # 64 features\r\n 'W_fc':tf.Variable(tf.random_normal([98304,1024])),\r\n 'out':tf.Variable(tf.random_normal([1024, n_classes]))}\r\n\r\n biases = {'b_conv1':tf.Variable(tf.random_normal([32])),\r\n 'b_conv2':tf.Variable(tf.random_normal([64])),\r\n 'b_fc':tf.Variable(tf.random_normal([1024])),\r\n 'out':tf.Variable(tf.random_normal([n_classes]))}\r\n\r\n # image X image Y image Z\r\n x = tf.reshape(x, shape=[-1, IMG_SIZE_PX, IMG_SIZE_PX, SLICE_COUNT, 1])\r\n\r\n conv1 = tf.nn.relu(conv3d(x, weights['W_conv1']) + biases['b_conv1'])\r\n conv1 = maxpool3d(conv1)\r\n\r\n\r\n conv2 = tf.nn.relu(conv3d(conv1, weights['W_conv2']) + biases['b_conv2'])\r\n conv2 = maxpool3d(conv2)\r\n\r\n fc = tf.reshape(conv2,[-1, 98304])\r\n fc = tf.nn.relu(tf.matmul(fc, weights['W_fc'])+biases['b_fc'])\r\n fc = tf.nn.dropout(fc, keep_rate)\r\n\r\n output = tf.matmul(fc, weights['out'])+biases['out']\r\n\r\n return output\r\n\r\ndef ClipIfNotNone(grad):\r\n# Credit to azni at http://stackoverflow.com/questions/39295136/gradient-clipping-appears-to-choke-on-none for solving none gradient problem\r\n if grad is None:\r\n return grad\r\n return tf.clip_by_value(grad, -1, 1)\r\n # Small utility function to evaluate a dataset by feeding batches of data to\r\n # {eval_data} and pulling the results from {eval_predictions}.\r\n # Saves memory and enables this to run on smaller GPUs.\r\n\r\ndef UConvNet(x,n_classes,IMG_SIZE_PX,SLICE_COUNT,keep_rate):\r\n # 3 x 3 x 3 patches, 1 channel, 32 features to compute.\r\n# Based off the paper 3D U-Net: Learning Dense Volumetric\r\n# Segmentation from Sparse Annotation\r\n# by Ozgun Cicek, Ahmed Abdulkadir, Soeren S. Lienkamp\r\n# available at https://arxiv.org/pdf/1606.06650.pdf\r\n #set standard deviation to 1/sqrt(x*y*z*input Channels),\r\n# resulting in overall standard deviation of root((1+Fraction of Inputs = =1)/2)\r\n# for weights. Maintain N(0,1) for biases based off of recommendations from this paper\r\n# Practical Recommendations for Gradient-Based Training of Deep Architectures\r\n# by Yoshua Bengio available herehttps://arxiv.org/pdf/1206.5533v2.pdf\r\n#\r\n weights = {'W_conv1':tf.Variable(tf.random_normal([3,3,3,1,32],stddev=1/math.sqrt(27))),\r\n # 5 x 5 x 5 patches, 32 channels, 64 features to compute.\r\n 'W_conv2':tf.Variable(tf.random_normal([3,3,3,32,64],stddev=1/math.sqrt(864))),\r\n # 64 features\r\n 'W_conv2.5':tf.Variable(tf.random_normal([3,3,3,64,64],stddev=1/math.sqrt(27*64))),\r\n 'W_conv3':tf.Variable(tf.random_normal([3,3,3,64,128],stddev=1/math.sqrt(27*64))),\r\n 'W_conv3.5':tf.Variable(tf.random_normal([3,3,3,128,128],stddev=1/math.sqrt(27*128))),\r\n 'W_conv4':tf.Variable(tf.random_normal([3,3,3,128,256],stddev=1/math.sqrt(27*128))),\r\n 'W_conv4.5':tf.Variable(tf.random_normal([3,3,3,256,256],stddev=1/math.sqrt(27*256))),\r\n 'W_conv5':tf.Variable(tf.random_normal([3,3,3,256,512],stddev=1/math.sqrt(27*256))),\r\n 'W_convT1':tf.Variable(tf.random_normal([3,3,3,512,512],stddev=1/math.sqrt(27*512))),\r\n 'W_conv6':tf.Variable(tf.random_normal([3,3,3,768,256],stddev=1/math.sqrt(27*768))),\r\n 'W_conv7':tf.Variable(tf.random_normal([3,3,3,256,256],stddev=1/math.sqrt(27*256))),\r\n 'W_convT2':tf.Variable(tf.random_normal([3,3,3,256,256],stddev=1/math.sqrt(27*256))),\r\n 'W_conv8':tf.Variable(tf.random_normal([3,3,3,384,128],stddev=1/math.sqrt(27*384))),\r\n 'W_conv9':tf.Variable(tf.random_normal([3,3,3,128,128],stddev=1/math.sqrt(27*128))),\r\n 'W_convT3':tf.Variable(tf.random_normal([3,3,3,128,128],stddev=1/math.sqrt(27*128))),\r\n 'W_conv10':tf.Variable(tf.random_normal([3,3,3,192,64],stddev=1/math.sqrt(27*192))),\r\n 'W_conv11':tf.Variable(tf.random_normal([3,3,3,64,64],stddev=1/math.sqrt(27*64))),\r\n 'W_convfinal':tf.Variable(tf.random_normal([3,3,3,64,n_classes],stddev=1/math.sqrt(27*64))),\r\n 'W_fc':tf.Variable(tf.random_normal([196608,1024],stddev=1/math.sqrt(196608))),\r\n 'out':tf.Variable(tf.random_normal([1024, n_classes],stddev=1/math.sqrt(1024)))\r\n }\r\n\r\n biases = {'b_conv1':tf.Variable(tf.random_normal([32])),\r\n 'b_conv2':tf.Variable(tf.random_normal([64])),\r\n 'b_conv2.5':tf.Variable(tf.random_normal([64])),\r\n 'b_conv3':tf.Variable(tf.random_normal([128])),\r\n 'b_conv3.5':tf.Variable(tf.random_normal([128])),\r\n 'b_conv4':tf.Variable(tf.random_normal([256])),\r\n 'b_conv4.5':tf.Variable(tf.random_normal([256])),\r\n 'b_conv5':tf.Variable(tf.random_normal([512])),\r\n 'b_convT1':tf.Variable(tf.random_normal([512])),\r\n 'b_conv6':tf.Variable(tf.random_normal([256])),\r\n 'b_conv7':tf.Variable(tf.random_normal([256])),\r\n 'b_convT2':tf.Variable(tf.random_normal([256])),\r\n 'b_conv8':tf.Variable(tf.random_normal([128])),\r\n 'b_conv9':tf.Variable(tf.random_normal([128])),\r\n 'b_convT3':tf.Variable(tf.random_normal([128])),\r\n 'b_conv10':tf.Variable(tf.random_normal([64])),\r\n 'b_conv11':tf.Variable(tf.random_normal([64])),\r\n 'b_convfinal':tf.Variable(tf.random_normal([2])),\r\n 'b_fc':tf.Variable(tf.random_normal([1024])),\r\n 'out':tf.Variable(tf.random_normal([n_classes]))}\r\n\r\n\r\n # image X image Y image Z\r\n x = tf.reshape(x, shape=[-1, IMG_SIZE_PX, IMG_SIZE_PX, SLICE_COUNT, 1])\r\n\r\n conv1 = tf.nn.relu(conv3d(x, weights['W_conv1']) + biases['b_conv1']) #32\r\n conv2 = tf.nn.relu(conv3d(conv1, weights['W_conv2']) + biases['b_conv2']) #64\r\n conv2p = maxpool3d(conv2)\r\n\r\n conv3 = tf.nn.relu(conv3d(conv2p, weights['W_conv2.5']) + biases['b_conv2.5']) #64\r\n conv4 = tf.nn.relu(conv3d(conv3, weights['W_conv3']) + biases['b_conv3']) #128\r\n conv4p = maxpool3d(conv4)\r\n\r\n conv5 = tf.nn.relu(conv3d(conv4p, weights['W_conv3.5']) + biases['b_conv3.5']) #128\r\n conv6 = tf.nn.relu(conv3d(conv5, weights['W_conv4']) + biases['b_conv4']) #256\r\n conv6p = maxpool3d(conv6)\r\n\r\n conv7 = tf.nn.relu(conv3d(conv6p, weights['W_conv4.5']) + biases['b_conv4.5']) #256\r\n conv8 = tf.nn.relu(conv3d(conv7, weights['W_conv5']) + biases['b_conv5']) #512\r\n\r\n# conv6Shape = tf.shape(conv6)\r\n#\r\n# smoother1 =tf.cond(tf.equal(tf.mod(conv6Shape[1], tf2),tf1), lambda: 1, lambda: 0)\r\n# smoother2 =tf.cond(tf.equal(tf.mod(conv6Shape[2], tf2),tf1), lambda: 1, lambda: 0)\r\n# smoother3 =tf.cond(tf.equal(tf.mod(conv6Shape[3], tf2),tf1), lambda: 1, lambda: 0)\r\n\r\n outPutShape = [1,16,16,6,512]\r\n\r\n conv9 = tf.nn.relu(conv3dT(conv8, weights['W_convT1'],outPutShape) + biases['b_convT1'])\r\n\r\n conv9 = tf.concat([conv6,conv9],4)\r\n\r\n conv10 = tf.nn.relu(conv3d(conv9, weights['W_conv6']) + biases['b_conv6']) #256\r\n conv11 = tf.nn.relu(conv3d(conv10, weights['W_conv7']) + biases['b_conv7']) #256\r\n\r\n# conv4Shape = tf.shape(conv6)\r\n\r\n# smoother1 =tf.cond(tf.equal(tf.mod(conv4Shape[1], tf2 ),tf1), lambda: 1, lambda: 0)\r\n# smoother2 =tf.cond(tf.equal(tf.mod(conv4Shape[2], tf2 ),tf1), lambda: 1, lambda: 0)\r\n# smoother3 =tf.cond(tf.equal(tf.mod(conv4Shape[3], tf2 ),tf1), lambda: 1, lambda: 0)\r\n\r\n outPutShape = [1,32,32,12,256]\r\n conv12 = tf.nn.relu(conv3dT(conv11, weights['W_convT2'],outPutShape) + biases['b_convT2'])\r\n conv12 = tf.concat([conv4,conv12], 4)\r\n\r\n conv13 = tf.nn.relu(conv3d(conv12, weights['W_conv8']) + biases['b_conv8']) #128\r\n conv14 = tf.nn.relu(conv3d(conv13, weights['W_conv9']) + biases['b_conv9']) #128\r\n\r\n# conv2Shape = tf.shape(conv6)\r\n\r\n# smoother1 =tf.cond(tf.equal(tf.mod(conv2Shape[1], tf2 ),tf1), lambda: 1, lambda: 0)\r\n# smoother2 =tf.cond(tf.equal(tf.mod(conv2Shape[2], tf2 ),tf1), lambda: 1, lambda: 0)\r\n# smoother3 =tf.cond(tf.equal(tf.mod(conv2Shape[3], tf2 ),tf1), lambda: 1, lambda: 0)\r\n#\r\n outPutShape = [1,64,64,24,128]\r\n\r\n conv15 = tf.nn.relu(conv3dT(conv14, weights['W_convT3'],outPutShape) + biases['b_convT3'])\r\n conv15 = tf.concat([conv2,conv15], 4)\r\n\r\n conv16 = tf.nn.relu(conv3d(conv15, weights['W_conv10']) + biases['b_conv10']) #64\r\n conv17 = tf.nn.relu(conv3d(conv16, weights['W_conv11']) + biases['b_conv11']) #64\r\n conv18 = tf.nn.relu(conv3d(conv17, weights['W_convfinal']) + biases['b_convfinal']) #2\r\n# output = tf.reshape(conv18,[n_classes,-1])\r\n fc = tf.reshape(conv18,[-1, 196608])\r\n\r\n fc = tf.nn.relu(tf.matmul(fc, weights['W_fc'])+biases['b_fc'])\r\n fc = tf.nn.dropout(fc, keep_rate)\r\n\r\n output = tf.matmul(fc, weights['out'])+biases['out']\r\n\r\n return output\r\ndef train_neural_network(x,y,train_data,validation_data,n_classes,IMG_SIZE_PX,SLICE_COUNT,keep_rate):\r\n prediction = convolutional_neural_network(x,n_classes,IMG_SIZE_PX,SLICE_COUNT,keep_rate)\r\n# cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )\r\n cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )\r\n optimizer = tf.train.AdamOptimizer(learning_rate=1e-3).minimize(cost)\r\n\r\n hm_epochs = 10\r\n with tf.Session() as sess:\r\n print(\"initializing sess\")\r\n sess.run(tf.global_variables_initializer())\r\n saver = tf.train.Saver()\r\n successful_runs = 0\r\n total_runs = 0\r\n print(\"beginning runs\")\r\n for epoch in range(hm_epochs):\r\n epoch_loss = 0\r\n thisRun = 0\r\n for data in train_data:\r\n\r\n total_runs += 1\r\n try:\r\n# if epoch == 0:\r\n# Z = data[0]\r\n# ZY = data[1]\r\n X = data[0]\r\n Y = data[1]\r\n _, c = sess.run([optimizer, cost], feed_dict={x: X, y: Y})\r\n\r\n epoch_loss += c\r\n successful_runs += 1\r\n thisRun += 1\r\n\r\n if thisRun % 100 == 0:\r\n print(str(thisRun) + \" out of \" + str(len(train_data)))\r\n print(\"run:\"+str(thisRun)+\" with epoch_loss\"+str(epoch_loss) + \"with loss c:\" + str(c))\r\n elif thisRun %10 == 0:\r\n print(\"run:\"+str(thisRun)+\" with epoch_loss\"+str(epoch_loss) + \"with loss c:\" + str(c))\r\n saver.save(sess, 'unet-model',global_step = successful_runs)\r\n except Exception as e:\r\n # I am passing for the sake of notebook space, but we are getting 1 shaping issue from one\r\n # input tensor. Not sure why, will have to look into it. Guessing it's\r\n # one of the depths that doesn't come to 20.\r\n pass\r\n #print(str(e))\r\n\r\n print('Epoch', epoch+1, 'completed out of',hm_epochs,'loss:',epoch_loss)\r\n\r\n# correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\r\n# accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\r\n#\r\n# print('Accuracy:',accuracy.eval({x:[i[0] for i in validation_data], y:[i[1] for i in validation_data]}))\r\n\r\n print('Done. Finishing accuracy:')\r\n# print('Accuracy:',accuracy.eval({x:[i[0] for i in validation_data], y:[i[1] for i in validation_data]}))\r\n\r\n print('fitment percent:',successful_runs/total_runs)\r\ndef eval_in_batches(data, sess,n_classes,IMG_SIZE_PX,SLICE_COUNT,keep_rate):\r\n\r\n x = tf.placeholder('float')\r\n \"\"\"Get all predictions for a dataset by running it in small batches.\"\"\"\r\n size = data.shape[0]\r\n if size < EVAL_BATCH_SIZE:\r\n raise ValueError(\"batch size for evals larger than dataset: %d\" % size)\r\n predictions = np.ndarray(shape=(size, n_classes), dtype=np.float32)\r\n for begin in xrange(0, size):\r\n inputX = data[begin]\r\n eval_prediction = tf.nn.softmax(UConvNet(x,n_classes,IMG_SIZE_PX,SLICE_COUNT,keep_rate))\r\n sess.run(tf.global_variables_initializer())\r\n predictions[begin] = sess.run(eval_prediction,feed_dict={x: inputX})\r\n return predictions\r\ndef error_rate(predictions, labels):\r\n \"\"\"Return the error rate based on dense predictions and sparse labels.\"\"\"\r\n accurate = 0\r\n for index in range(0,predictions.shape[0]):\r\n if np.argmax(predictions[index]) == np.argmax(labels[index]):\r\n accurate = accurate + 1\r\n return(1-accurate/predictions.shape[0])\r\n\r\ndef train_unet(x,y,train_data,validation_data,n_classes,IMG_SIZE_PX,SLICE_COUNT,keep_rate):\r\n prediction = UConvNet(x,n_classes,IMG_SIZE_PX,SLICE_COUNT,keep_rate)\r\n# cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(prediction,y) )\r\n# apply gradient clipping to eliminate gradient explosion problem\r\n cost = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y) )\r\n optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)\r\n gradients = optimizer.compute_gradients(cost)\r\n\r\n capped_gvs = [(ClipIfNotNone(grad), var) for grad, var in gradients]\r\n train_op = optimizer.apply_gradients(capped_gvs)\r\n hm_epochs = 10\r\n\r\n\r\n with tf.Session() as sess:\r\n print(\"initializing sess\")\r\n sess.run(tf.global_variables_initializer())\r\n saver = tf.train.Saver()\r\n successful_runs = 0\r\n total_runs = 0\r\n print(\"beginning runs\")\r\n for epoch in range(hm_epochs):\r\n error = 0\r\n epoch_loss = 0\r\n thisRun = 0\r\n for data in train_data:\r\n\r\n total_runs += 1\r\n try:\r\n X = data[0]\r\n Y = data[1]\r\n _, c = sess.run([train_op, cost], feed_dict={x: X, y: Y})\r\n\r\n epoch_loss += c\r\n# test_error = error_rate(eval_in_batches(testX, sess,n_classes,IMG_SIZE_PX,SLICE_COUNT,keep_rate), testY)\r\n# print(test_error)\r\n successful_runs += 1\r\n thisRun += 1\r\n if thisRun % 100 == 0:\r\n print(str(thisRun) + \" out of \" + str(len(train_data)))\r\n print(\"run:\"+str(thisRun)+\" with epoch_loss\"+str(epoch_loss) + \"with loss c:\" + str(c))\r\n saver.save(sess, './unetmodel')\r\n elif thisRun %5 == 0:\r\n print(\"run:\"+str(thisRun)+\" with epoch_loss\"+str(epoch_loss) + \"with loss c:\" + str(c))\r\n saver.save(sess, './unetmodel')\r\n except Exception as e:\r\n # I am passing for the sake of notebook space, but we are getting 1 shaping issue from one\r\n # input tensor. Not sure why, will have to look into it. Guessing it's\r\n # one of the depths that doesn't come to 20.\r\n error += 1\r\n if error % 100 == 0:\r\n print(\"error:\"+str(error) + \" out of \" + str(len(train_data)))\r\n pass\r\n #print(str(e))\r\n saver.save(sess, './unetmodel')\r\n print(\"error:\"+str(error))\r\n print(\"successful_runs:\"+str(successful_runs))\r\n print('Epoch', epoch+1, 'completed out of',hm_epochs,'loss:',epoch_loss)\r\n saver.save(sess, 'unet-model',global_step = successful_runs)\r\n# correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\r\n# accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\r\n#\r\n# print('Accuracy:',accuracy.eval({x:[i[0] for i in validation_data], y:[i[1] for i in validation_data]}))\r\n\r\n print('Done. Finishing accuracy:')\r\n# print('Accuracy:',accuracy.eval({x:[i[0] for i in validation_data], y:[i[1] for i in validation_data]}))\r\n\r\n print('fitment percent:',successful_runs/total_runs)\r\n\r\ndef main():\r\n\r\n\r\n\r\n# At this point, we've got the list of patients by their IDs, and their associated labels stored in a dataframe.\r\n# Now, we can begin to iterate through the patients and gather their respective data.\r\n# We're almost certainly going to need to do some preprocessing of this data, but we'll see.\r\n\r\n x = tf.placeholder('float')\r\n y = tf.placeholder('float')\r\n\r\n data_dir = 'C:/Users/Zimu/Desktop/Project/TrainingData/'\r\n if processData:\r\n data_dir = 'D:/S2/stage1/'\r\n\r\n patients = os.listdir(data_dir)\r\n patients.sort()\r\n patients = os.listdir(data_dir)\r\n labels = pd.read_csv('C:/Users/Zimu/Desktop/Project/stage1_labels.csv', index_col=0)\r\n numSuperPixels=100\r\n much_data = []\r\n for num,patient in enumerate(patients):\r\n print(str(num) + \":\" + patient)\r\n if num % 100 == 0:\r\n print(str(num) + \":\" + patient)\r\n try:\r\n img_data,label = process_data(patient,labels,data_dir, img_px_size=IMG_SIZE_PX, hm_slices=SLICE_COUNT, superPixels = numSuperPixels)\r\n # np.save('imgData-{}-{}-{}-{}.npy'.format(IMG_SIZE_PX,IMG_SIZE_PX,SLICE_COUNT,patient), img_data)\r\n #print(img_data.shape,label)\r\n if (1-np.isscalar(img_data)):\r\n much_data.append([img_data,label])\r\n except KeyError as e:\r\n print('This is unlabeled data!')\r\n np.save('alldata3-{}-{}-{}.npy'.format(IMG_SIZE_PX,IMG_SIZE_PX,SLICE_COUNT), much_data)\r\n\r\n much_data1 = np.load('../alldata3-64-64-24.npy')\r\n\r\n train_data1 = much_data1[:-300]\r\n validation_data1 = much_data1[-300:]\r\n\r\n\r\n\r\n\r\n# train_neural_network(x,y,train_data1,validation_data1,n_classes,IMG_SIZE_PX,SLICE_COUNT,keep_rate)\r\n train_unet(x,y,train_data1,validation_data1,n_classes,IMG_SIZE_PX,SLICE_COUNT,keep_rate)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n\tmain()\r\n", "sub_path": "Unet.py", "file_name": "Unet.py", "file_ext": "py", "file_size_in_byte": 30932, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.listdir", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 57, "usage_type": "attribute"}, {"api_name": "numpy.float64", "line_number": 70, "usage_type": "attribute"}, {"api_name": "numpy.int16", "line_number": 71, "usage_type": "attribute"}, {"api_name": "numpy.int16", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.int16", "line_number": 75, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 90, "usage_type": "call"}, {"api_name": "scipy.ndimage.ndimage.interpolation.zoom", "line_number": 94, "usage_type": "call"}, {"api_name": "scipy.ndimage.ndimage", "line_number": 94, "usage_type": "attribute"}, {"api_name": "scipy.ndimage", "line_number": 94, "usage_type": "name"}, {"api_name": "skimage.measure.marching_cubes", "line_number": 104, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 104, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "mpl_toolkits.mplot3d.art3d.Poly3DCollection", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "numpy.unique", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.int8", "line_number": 141, "usage_type": "attribute"}, {"api_name": "skimage.measure.label", "line_number": 142, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 142, "usage_type": "name"}, {"api_name": "skimage.measure.label", "line_number": 160, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 160, "usage_type": "name"}, {"api_name": "skimage.measure.label", "line_number": 171, "usage_type": "call"}, {"api_name": "skimage.measure", "line_number": 171, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 218, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 228, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 228, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 228, "usage_type": "call"}, {"api_name": "numpy.fromstring", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 229, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 232, "usage_type": "call"}, {"api_name": "math.floor", "line_number": 235, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 241, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 241, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 245, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 245, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 248, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 250, "usage_type": "call"}, {"api_name": "tensorflow.train.ExponentialMovingAverage", "line_number": 255, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 255, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.moments", "line_number": 257, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 257, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.moments", "line_number": 259, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 259, "usage_type": "attribute"}, {"api_name": "tensorflow.cond", "line_number": 261, "usage_type": "call"}, {"api_name": "tensorflow.cond", "line_number": 262, "usage_type": "call"}, {"api_name": "tensorflow.nn.batch_normalization", "line_number": 263, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 263, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.conv3d", "line_number": 266, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 266, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.conv3d_transpose", "line_number": 269, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 269, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.max_pool3d", "line_number": 273, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 273, "usage_type": "attribute"}, {"api_name": "tensorflow.Variable", "line_number": 277, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 277, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 279, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 279, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 281, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 281, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 282, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 282, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 284, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 284, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 285, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 285, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 286, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 286, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 287, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 287, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 290, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 292, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 292, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 296, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 296, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 299, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 300, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 300, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 300, "usage_type": "call"}, {"api_name": "tensorflow.nn.dropout", "line_number": 301, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 301, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 303, "usage_type": "call"}, {"api_name": "tensorflow.clip_by_value", "line_number": 311, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 328, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 328, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 328, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 330, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 330, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 330, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 332, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 332, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 332, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 333, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 333, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 333, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 334, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 334, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 334, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 335, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 335, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 335, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 336, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 336, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 336, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 337, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 337, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 337, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 338, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 338, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 338, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 339, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 339, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 339, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 340, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 340, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 340, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 341, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 341, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 341, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 342, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 342, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 342, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 343, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 343, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 343, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 344, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 344, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 344, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 345, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 345, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 345, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 346, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 346, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 346, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 347, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 347, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 347, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 348, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 348, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 348, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 349, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 349, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 349, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 352, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 352, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 353, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 353, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 354, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 354, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 355, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 355, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 356, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 356, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 357, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 357, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 358, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 358, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 359, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 359, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 360, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 360, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 361, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 361, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 362, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 362, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 363, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 363, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 364, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 364, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 365, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 365, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 366, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 366, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 367, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 367, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 368, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 368, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 369, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 369, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 370, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 370, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 371, "usage_type": "call"}, {"api_name": "tensorflow.random_normal", "line_number": 371, "usage_type": "call"}, {"api_name": "tensorflow.reshape", "line_number": 375, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 377, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 377, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 378, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 378, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 381, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 381, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 382, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 382, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 385, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 385, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 386, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 386, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 389, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 389, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 390, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 390, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 400, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 400, "usage_type": "attribute"}, {"api_name": "tensorflow.concat", "line_number": 402, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 404, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 404, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 405, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 405, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 414, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 414, "usage_type": "attribute"}, {"api_name": "tensorflow.concat", "line_number": 415, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 417, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 417, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 418, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 418, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 428, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 428, "usage_type": "attribute"}, {"api_name": "tensorflow.concat", "line_number": 429, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 431, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 431, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 432, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 432, "usage_type": "attribute"}, {"api_name": "tensorflow.nn.relu", "line_number": 433, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 433, "usage_type": "attribute"}, {"api_name": "tensorflow.reshape", "line_number": 435, "usage_type": "call"}, {"api_name": "tensorflow.nn.relu", "line_number": 437, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 437, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 437, "usage_type": "call"}, {"api_name": "tensorflow.nn.dropout", "line_number": 438, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 438, "usage_type": "attribute"}, {"api_name": "tensorflow.matmul", "line_number": 440, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 446, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits", "line_number": 446, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 446, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 447, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 447, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 450, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 452, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 453, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 453, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 501, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 506, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 506, "usage_type": "attribute"}, {"api_name": "six.moves.xrange", "line_number": 507, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax", "line_number": 509, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 509, "usage_type": "attribute"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 510, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 517, "usage_type": "call"}, {"api_name": "tensorflow.reduce_mean", "line_number": 525, "usage_type": "call"}, {"api_name": "tensorflow.nn.softmax_cross_entropy_with_logits", "line_number": 525, "usage_type": "call"}, {"api_name": "tensorflow.nn", "line_number": 525, "usage_type": "attribute"}, {"api_name": "tensorflow.train.AdamOptimizer", "line_number": 526, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 526, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 534, "usage_type": "call"}, {"api_name": "tensorflow.global_variables_initializer", "line_number": 536, "usage_type": "call"}, {"api_name": "tensorflow.train.Saver", "line_number": 537, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 537, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 597, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 598, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 604, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 606, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 607, "usage_type": "call"}, {"api_name": "numpy.isscalar", "line_number": 618, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 622, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 624, "usage_type": "call"}]} +{"seq_id": "98785222", "text": "import pandas as pd\r\nfrom collections import Counter\r\nimport sys\r\nsys.path.append('../speech-accent-recognition/src>')\r\nimport getsplit\r\nfrom keras import utils\r\nimport accuracy\r\nimport multiprocessing\r\nimport librosa\r\nimport numpy as np\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nfrom keras.models import Sequential\r\nfrom keras.layers.core import Dense, Dropout, Flatten\r\nfrom keras.layers.convolutional import MaxPooling2D, Conv2D\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom keras.callbacks import EarlyStopping, TensorBoard\r\nfrom keras.models import load_model\r\nimport argparse\r\nprint(librosa.__version__)\r\n\r\n\r\nDEBUG = True\r\nSILENCE_THRESHOLD = .01\r\nRATE = 24000\r\nN_MFCC = 30\r\nCOL_SIZE = 30\r\nEPOCHS = 10 #35#250\r\n\r\ndef to_categorical(y):\r\n '''\r\n Converts list of languages into a binary class matrix\r\n :param y (list): list of languages\r\n :return (numpy array): binary class matrix\r\n '''\r\n lang_dict = {}\r\n for index,language in enumerate(set(y)):\r\n lang_dict[language] = index\r\n y = list(map(lambda x: lang_dict[x],y))\r\n return utils.to_categorical(y, len(lang_dict))\r\n\r\ndef get_wav(language_num):\r\n '''\r\n Load wav file from disk and down-samples to RATE\r\n :param language_num (list): list of file names\r\n :return (numpy array): Down-sampled wav file\r\n '''\r\n\r\n y, sr = librosa.load('../audio/{}.wav'.format(language_num))\r\n return(librosa.core.resample(y=y,orig_sr=sr,target_sr=RATE, scale=True))\r\n\r\ndef to_mfcc(wav):\r\n '''\r\n Converts wav file to Mel Frequency Ceptral Coefficients\r\n :param wav (numpy array): Wav form\r\n :return (2d numpy array: MFCC\r\n '''\r\n return(librosa.feature.mfcc(y=wav, sr=RATE, n_mfcc=13))\r\n\r\ndef remove_silence(wav, thresh=0.04, chunk=5000):\r\n '''\r\n Searches wav form for segments of silence. If wav form values are lower than 'thresh' for 'chunk' samples, the values will be removed\r\n :param wav (np array): Wav array to be filtered\r\n :return (np array): Wav array with silence removed\r\n '''\r\n\r\n tf_list = []\r\n for x in range(len(wav) / chunk):\r\n if (np.any(wav[chunk * x:chunk * (x + 1)] >= thresh) or np.any(wav[chunk * x:chunk * (x + 1)] <= -thresh)):\r\n tf_list.extend([True] * chunk)\r\n else:\r\n tf_list.extend([False] * chunk)\r\n\r\n tf_list.extend((len(wav) - len(tf_list)) * [False])\r\n return(wav[tf_list])\r\n\r\ndef normalize_mfcc(mfcc):\r\n '''\r\n Normalize mfcc\r\n :param mfcc:\r\n :return:\r\n '''\r\n mms = MinMaxScaler()\r\n return(mms.fit_transform(np.abs(mfcc)))\r\n\r\ndef make_segments(mfccs,labels):\r\n '''\r\n Makes segments of mfccs and attaches them to the labels\r\n :param mfccs: list of mfccs\r\n :param labels: list of labels\r\n :return (tuple): Segments with labels\r\n '''\r\n segments = []\r\n seg_labels = []\r\n for mfcc,label in zip(mfccs,labels):\r\n for start in range(0, int(mfcc.shape[1] / COL_SIZE)):\r\n segments.append(mfcc[:, start * COL_SIZE:(start + 1) * COL_SIZE])\r\n seg_labels.append(label)\r\n return(segments, seg_labels)\r\n\r\ndef segment_one(mfcc):\r\n '''\r\n Creates segments from on mfcc image. If last segments is not long enough to be length of columns divided by COL_SIZE\r\n :param mfcc (numpy array): MFCC array\r\n :return (numpy array): Segmented MFCC array\r\n '''\r\n segments = []\r\n for start in range(0, int(mfcc.shape[1] / COL_SIZE)):\r\n segments.append(mfcc[:, start * COL_SIZE:(start + 1) * COL_SIZE])\r\n return(np.array(segments))\r\n\r\ndef create_segmented_mfccs(X_train):\r\n '''\r\n Creates segmented MFCCs from X_train\r\n :param X_train: list of MFCCs\r\n :return: segmented mfccs\r\n '''\r\n segmented_mfccs = []\r\n for mfcc in X_train:\r\n segmented_mfccs.append(segment_one(mfcc))\r\n return(segmented_mfccs)\r\n\r\nmodel=load_model('model5.h5')\r\n\r\n\r\ndef get_wav1(language_num):\r\n '''\r\n Load wav file from disk and down-samples to RATE\r\n :param language_num (list): list of file names\r\n :return (numpy array): Down-sampled wav file\r\n '''\r\n print(language_num)\r\n y, sr = librosa.load('audio_ORG/'+language_num)\r\n return(librosa.core.resample(y=y,orig_sr=sr,target_sr=RATE, scale=True))\r\n\r\n\r\n\r\ndef audioname(audiofile):\r\n global fname\r\n fname=audiofile\r\n\r\n\r\n\r\n\r\n\r\ndef main():\r\n name=[]\r\n \r\n name=[fname]\r\n pool = multiprocessing.Pool(processes=1)\r\n print(multiprocessing.cpu_count())\r\n X_train = pool.map(get_wav1,name)\r\n print(X_train[0])\r\n X_train = pool.map(to_mfcc, X_train)\r\n #X_train=pool.map(normalize_mfcc,X_train)\r\n\r\n y_predicted = accuracy.predict_class_all(create_segmented_mfccs(X_train), model)\r\n print(y_predicted)\r\n return(y_predicted)\r\n ", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 4797, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "librosa.__version__", "line_number": 21, "usage_type": "attribute"}, {"api_name": "keras.utils.to_categorical", "line_number": 41, "usage_type": "call"}, {"api_name": "keras.utils", "line_number": 41, "usage_type": "name"}, {"api_name": "librosa.load", "line_number": 50, "usage_type": "call"}, {"api_name": "librosa.core.resample", "line_number": 51, "usage_type": "call"}, {"api_name": "librosa.core", "line_number": 51, "usage_type": "attribute"}, {"api_name": "librosa.feature.mfcc", "line_number": 59, "usage_type": "call"}, {"api_name": "librosa.feature", "line_number": 59, "usage_type": "attribute"}, {"api_name": "numpy.any", "line_number": 70, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 111, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 124, "usage_type": "call"}, {"api_name": "librosa.load", "line_number": 134, "usage_type": "call"}, {"api_name": "librosa.core.resample", "line_number": 135, "usage_type": "call"}, {"api_name": "librosa.core", "line_number": 135, "usage_type": "attribute"}, {"api_name": "multiprocessing.Pool", "line_number": 151, "usage_type": "call"}, {"api_name": "multiprocessing.cpu_count", "line_number": 152, "usage_type": "call"}, {"api_name": "accuracy.predict_class_all", "line_number": 158, "usage_type": "call"}]} +{"seq_id": "173504248", "text": "#!/usr/bin/env python\nimport numpy as np\nimport matplotlib.mlab as mlab\nimport matplotlib.pyplot as plt\nimport sys\n\nx = []\nfop = open(sys.argv[1],'r')\nfor line in fop.readlines():\n x.append(float(line.split()[int(sys.argv[2])]))\nfop.close()\n\nn, bins, patches = plt.hist(x, bins=18, normed=1, facecolor='blue', alpha=0.75)\n\nplt.xlabel(r'$\\theta$')\nplt.ylabel('Probability')\nplt.title(r'$\\mathrm{Histogram\\ of\\ smth:}\\ \\mu=100,\\ \\sigma=15$')\n#plt.axis([40, 160, 0, 0.03])\nplt.grid(True)\nplt.show()\n#plt.savefig(\"ThetaDist.eps\")\n", "sub_path": "hist/histogram.py", "file_name": "histogram.py", "file_ext": "py", "file_size_in_byte": 527, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.argv", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 10, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "259988501", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('events', '0015_auto_20150423_1301'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='event',\n name='range',\n ),\n migrations.AlterField(\n model_name='eventdate',\n name='date',\n field=models.DateField(null=True, verbose_name='\\u0414\\u0430\\u0442\\u0430 \\u043d\\u0430\\u0447\\u0430\\u043b\\u0430'),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='eventdate',\n name='date_end',\n field=models.DateField(null=True, verbose_name='\\u0414\\u0430\\u0442\\u0430 \\u043e\\u043a\\u043e\\u043d\\u0447\\u0430\\u043d\\u0438\\u044f'),\n preserve_default=True,\n ),\n migrations.AlterField(\n model_name='eventdate',\n name='time_end',\n field=models.TimeField(null=True, verbose_name='\\u0412\\u0440\\u0435\\u043c\\u044f \\u043e\\u043a\\u043e\\u043d\\u0447\\u0430\\u043d\\u0438\\u044f'),\n preserve_default=True,\n ),\n ]\n", "sub_path": "apps/events/migrations/0016_auto_20150424_1653.py", "file_name": "0016_auto_20150424_1653.py", "file_ext": "py", "file_size_in_byte": 1188, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.RemoveField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 24, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 24, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.TimeField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "653346540", "text": "import bs4\nimport urllib.request\nfrom symlist import unicode_to_latex\nimport os\nimport cv2\nimport numpy as np\n\ni=0\n\nfor key in unicode_to_latex:\n unicode_trimmed = key.decode(\"utf-8\")[2:]\n print(unicode_trimmed)\n if unicode_trimmed in os.listdir('/media/daniel/archives/Mac-Files/Misc/ocr_training/images'):\n continue\n\n try:\n url = 'http://www.fileformat.info/info/unicode/char/'+unicode_trimmed+'/index.htm'\n\n req = urllib.request.Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n content = urllib.request.urlopen(req).read()\n soup = bs4.BeautifulSoup(content, 'html.parser')\n\n img = soup.findAll(\"a\", {\"class\": \"thumbnail\"})[0]\n\n imgUrl = 'http://www.fileformat.info' + img.find('img')['src']\n req = urllib.request.Request(imgUrl, headers={'User-Agent': 'Mozilla/5.0'})\n\n imgPath = os.path.join('images', unicode_trimmed + '.png')\n\n resource = urllib.request.urlopen(req)\n\n nparr = np.fromstring(resource.read(), np.uint8)\n img = cv2.imdecode(nparr, cv2.IMREAD_UNCHANGED)\n\n img = img[:100,:100]\n cv2.imwrite(imgPath, img)\n\n except:\n print('failed for',unicode_trimmed)\n\n", "sub_path": "symbol_image.py", "file_name": "symbol_image.py", "file_ext": "py", "file_size_in_byte": 1195, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "symlist.unicode_to_latex", "line_number": 10, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 13, "usage_type": "call"}, {"api_name": "urllib.request.request.Request", "line_number": 19, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 19, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 19, "usage_type": "name"}, {"api_name": "urllib.request.request.urlopen", "line_number": 20, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 20, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 20, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 21, "usage_type": "call"}, {"api_name": "urllib.request.request.Request", "line_number": 26, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 26, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 26, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "urllib.request.request.urlopen", "line_number": 30, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 30, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.fromstring", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 32, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.IMREAD_UNCHANGED", "line_number": 33, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "10336366", "text": "\"\"\"\n6 Qubit Recursion Script\n\nX SHAPE (4th on the list)\n\"\"\"\n\nimport State_Solver as SSV\nimport WSO6 as WSV\nfrom tqdm import tqdm\nimport numpy as np\n\n\nrho = np.load('6qb_random_pure.npy')\n\n\nbasis = ['seseee', 'sseeee', 'seesee', 'eseese', 'eseees']\n\nWitness = WSV.Witness_Optimizer(64, basis, rho)\n\n\nfor k in tqdm(range(5)):\n State = SSV.State_Solver(64, Witness)\n Witness = WSV.Witness_Optimizer(64, basis, State)\n expv = np.trace(np.dot(Witness, State))\n print(\"Finished step: \", k, \"out of 5\")\n print(expv)\nprint('DONE!')\n\n\nnp.save('state_6qb_x_shape.npy', State)\nnp.save('witness_6qb_x_shape.npy', Witness)\n", "sub_path": "run_script_2.py", "file_name": "run_script_2.py", "file_ext": "py", "file_size_in_byte": 625, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.load", "line_number": 13, "usage_type": "call"}, {"api_name": "WSO6.Witness_Optimizer", "line_number": 18, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 21, "usage_type": "call"}, {"api_name": "State_Solver.State_Solver", "line_number": 22, "usage_type": "call"}, {"api_name": "WSO6.Witness_Optimizer", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.trace", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "342468276", "text": "import pyglet\nimport messages\nfrom obj_def import *\n\n\nclass AIcontrolsState:\n Start, Run, Exit = range(3)\n\n\nclass AItype:\n Dummy, DumbAi = range(2)\n\n @classmethod\n def contruct_ai(cls, aitype, index, battle_field_size):\n if aitype == cls.DumbAi:\n return DumbAI(index, battle_field_size)\n elif aitype == cls.Dummy:\n return Dummy(index)\n\n\nclass AIcontrols:\n def __init__(self, configuration, messenger=None, train_mode=False):\n self.ai_state = AIcontrolsState.Start\n self.train_mode = train_mode\n self.messenger = messenger\n self.battle_field_size = np.array([0.0, 0.0])\n self.objects_copy = None\n self.configuration = None\n self.result = None\n self.ai_objs = []\n for index in range(0, ObjectType.ObjArrayTotal):\n self.ai_objs.append(Dummy(index))\n self.update_ai_settings(configuration)\n self.framerate = 30\n self.functions = {messages.AIcontrols.Quit: self.stop_ai,\n messages.AIcontrols.UpdateObjects: self.update_objects,\n messages.AIcontrols.Run: self.start_ai_controls,\n messages.AIcontrols.UpdateAiSettings: self.update_ai_settings}\n if not self.train_mode:\n pyglet.clock.schedule_interval(self.read_mes, 1.0 / self.framerate)\n pyglet.clock.schedule_interval(self.recalc, 1.0 / self.framerate)\n\n def read_mes(self, dt):\n if self.ai_state != AIcontrolsState.Exit:\n while True:\n data = self.messenger.get_message(messages.AIcontrols)\n if not data:\n break\n self.functions[data['func']](**data['args']) if 'args' in data else self.functions[data['func']]()\n\n def start_ai_controls(self):\n self.ai_state = AIcontrolsState.Run\n\n def stop_ai(self):\n self.ai_state = AIcontrolsState.Exit\n\n def update_objects(self, objects_copy):\n self.objects_copy = objects_copy\n\n def update_ai_settings(self, configuration):\n self.configuration = configuration\n if configuration:\n offset_counter = {}\n for key in configuration:\n for item in configuration[key]:\n if key not in offset_counter:\n offset_counter[key] = 0\n else:\n offset_counter[key] += 1\n\n if key == ObjectType.FieldSize:\n self.battle_field_size[0], self.battle_field_size[1] = item[0], item[1]\n if key in (ObjectType.Player1, ObjectType.Player2, ObjectType.Bot1, ObjectType.Bot2):\n if len(item) == 6:\n _, _, _, _, _, aitype = item\n off_counter = offset_counter[key]\n obj_offset, _ = ObjectType.offset(key)\n obj_ind = obj_offset + off_counter\n self.ai_objs[obj_ind] = AItype.contruct_ai(aitype, obj_ind, self.battle_field_size)\n\n def recalc(self, dt, objects_for_train=None):\n self.result = []\n if objects_for_train is not None:\n self.train_mode = True\n self.objects_copy = objects_for_train\n if self.ai_state == AIcontrolsState.Run and (self.objects_copy is not None or self.train_mode):\n for index in range(0, ObjectType.ObjArrayTotal):\n if self.objects_copy[index][ObjectProp.ObjType] == ObjectType.Absent:\n continue\n result = self.ai_objs[index].calc_behaviour(self.objects_copy)\n if result is None:\n continue\n turn_ctrl, vel_ctrl = result\n if self.train_mode:\n self.result.append([index, vel_ctrl, turn_ctrl])\n continue\n self.messenger.objects_set_control_signal(index, ObjectProp.VelControl, vel_ctrl)\n self.messenger.objects_set_control_signal(index, ObjectProp.TurnControl, turn_ctrl)\n if self.train_mode:\n return self.result\n\n\nclass Dummy:\n def __init__(self, index):\n self.index = index\n\n def calc_behaviour(self, objects_state):\n return None\n\n\nclass DumbAI(Dummy):\n def __init__(self, index, battle_field_size):\n super(DumbAI, self).__init__(index)\n self.battle_field_size = battle_field_size\n self.centre_coord = self.battle_field_size / 2\n self.r_attack = np.int32(100)\n self.crit_approx = np.int32(50)\n self.r_crit = np.int32(100)\n self.collision_crit = np.int32(100)\n self.front_crit_collision = np.int32(350)\n self.enemy_index = np.int32(0)\n self.obj = np.zeros(ObjectProp.Total)\n self.obj_coord = np.array([0.0,0.0])\n self.obj_dir = np.float(0.0)\n self.obj_dir_vec = np.array([0.0, 0.0])\n self.obj_vel = np.float(0.0)\n self.centre_dir = np.array([0.0, 0.0])\n self.enemy_id = np.int32(0)\n self.enemy = np.zeros(ObjectProp.Total)\n self.enemy_coord = np.array([0.0,0.0])\n self.enemy_dir = np.float(0.0)\n self.enemy_dir_vec = np.array([0.0,0.0])\n self.enemy_vel = np.float(0.0)\n self.aim_coord = np.array([0.0, 0.0])\n self.diff_coord = np.array([0.0, 0.0])\n self.angle, self.rotation_side = np.float(0.0), np.float(0.0)\n self.turn_mod = np.float(0.0)\n self.turn_ctrl = np.float(0.0)\n self.rotation_matrix = np.array([[0.0, 0.0], [0.0, 0.0]])\n self.vec1, self.vec2 = np.array([0.0, 0.0]), np.array([0.0, 0.0])\n self.vec2_rel = np.array([0.0, 0.0])\n self.angle_min = np.float(0.0)\n self.vel_ctrl = np.float(0.0)\n self.rot_side = np.float(0.0)\n self.own_team = Teams.team_by_id(self.index)\n self.enemy_team = Teams.get_opposite_team(self.own_team)\n self.friendly_ids = Teams.get_team_obj_ids(self.own_team)\n self.enemy_ids = Teams.get_team_obj_ids(self.enemy_team)\n self.nearest_enemy_id = None\n self.nearest_object_id = None\n self.nearest_object = np.zeros(ObjectProp.Total)\n self.nearest_coord = np.array([0.0, 0.0])\n self.nearest_dir_vec = np.array([0.0, 0.0])\n self.nearest_dir = np.float(0.0)\n self.obj_diff_coord = np.array([0.0, 0.0])\n self.angle_objs = np.float(0.0)\n self.rotation_side_objs = np.float(0.0)\n\n def calc_nearest_dir(self, vec1, vec2):\n self.vec1, self.vec2 = vec1 / np.linalg.norm(vec1), vec2 / np.linalg.norm(vec2)\n self.rotation_matrix[0][0], self.rotation_matrix[0][1], self.rotation_matrix[1][0], self.rotation_matrix[1][1]= vec1[0], vec1[1], -vec1[1], vec1[0]\n self.vec2_rel = np.matmul(self.rotation_matrix, self.vec2)\n self.angle_min = np.degrees(np.abs(np.arccos(self.vec2_rel[0])))\n self.rotation_side = np.float(1) if np.sign(self.vec2_rel[1]) >= 0 else np.float(-1)\n return self.angle_min, self.rotation_side\n\n def calc_nearest_obj_and_enemy(self, objects_state):\n self.nearest_enemy_id = None\n enemy_distance = None\n for ind in self.enemy_ids:\n if ind == self.index:\n continue\n if objects_state[ind][ObjectProp.ObjType] != ObjectType.Absent:\n curr_dist = np.linalg.norm(np.array([objects_state[ind][ObjectProp.Xcoord] - self.obj_coord[0],\n objects_state[ind][ObjectProp.Ycoord] - self.obj_coord[1]]))\n if enemy_distance is None or enemy_distance > curr_dist:\n enemy_distance = curr_dist\n self.nearest_enemy_id = ind\n self.nearest_object_id = self.nearest_enemy_id\n nearest_distance = enemy_distance\n for ind in self.friendly_ids:\n if ind == self.index:\n continue\n if objects_state[ind][ObjectProp.ObjType] != ObjectType.Absent:\n curr_dist = np.linalg.norm(np.array([objects_state[ind][ObjectProp.Xcoord] - self.obj_coord[0],\n objects_state[ind][ObjectProp.Ycoord] - self.obj_coord[1]]))\n if nearest_distance is None or nearest_distance > curr_dist:\n nearest_distance = curr_dist\n self.nearest_object_id = ind\n return self.nearest_enemy_id, self.nearest_object_id\n\n def calc_behaviour(self, objects_state):\n self.obj = objects_state[self.index]\n self.obj_coord[0], self.obj_coord[1] = self.obj[ObjectProp.Xcoord], self.obj[ObjectProp.Ycoord]\n self.obj_dir = self.obj[ObjectProp.Dir]\n self.obj_dir_vec[0], self.obj_dir_vec[1] = np.cos(np.radians(self.obj_dir)), np.sin(np.radians(self.obj_dir))\n self.obj_vel = self.obj[ObjectProp.Velocity]\n self.nearest_enemy_id, self.nearest_object_id = self.calc_nearest_obj_and_enemy(objects_state)\n\n self.enemy = objects_state[self.nearest_enemy_id] if self.nearest_enemy_id is not None else None\n self.nearest_object = objects_state[self.nearest_object_id] if self.nearest_object_id is not None else None\n if self.battle_field_size[0] - self.crit_approx < self.obj_coord[0] or self.obj_coord[0] < self.crit_approx \\\n or self.battle_field_size[1] - self.crit_approx < self.obj_coord[1] or self.obj_coord[1] < self.crit_approx:\n self.centre_dir = self.centre_coord - self.obj_coord\n self.angle, self.rotation_side = self.calc_nearest_dir(self.obj_dir_vec, self.centre_dir)\n self.rot_side = (1 / 180 * self.angle) * self.rotation_side\n self.vel_ctrl = np.float(1) if self.angle <= np.float(90) else np.float(-1)\n return self.rot_side, self.vel_ctrl\n\n if self.nearest_object is not None:\n self.nearest_coord[0] = self.nearest_object[ObjectProp.Xcoord]\n self.nearest_coord[1] = self.nearest_object[ObjectProp.Ycoord]\n self.nearest_dir = self.nearest_object[ObjectProp.Dir]\n self.nearest_dir_vec[0], self.nearest_dir_vec[1] = np.cos(np.radians(self.nearest_dir)), np.sin(np.radians(self.nearest_dir))\n self.angle, self.rotation_side = self.calc_nearest_dir(self.nearest_dir_vec, self.obj_dir_vec)\n self.obj_diff_coord = self.obj_coord - self.enemy_coord\n self.angle_objs, self.rotation_side_objs = self.calc_nearest_dir(self.obj_dir_vec, self.obj_diff_coord)\n if self.angle > 175 and self.angle_objs > 175 and self.obj[ObjectProp.Velocity] > 130 and np.linalg.norm(self.nearest_coord - self.obj_coord) < self.front_crit_collision:\n self.turn_ctrl = 1\n self.vel_ctrl = -1\n return self.turn_ctrl, self.vel_ctrl\n\n if np.linalg.norm(self.nearest_coord - self.obj_coord) < self.collision_crit:\n self.angle, self.rotation_side = self.calc_nearest_dir(self.obj_dir_vec, self.nearest_coord - self.obj_coord)\n if self.angle < 90:\n self.turn_ctrl = -self.rotation_side\n self.vel_ctrl = -1\n return self.turn_ctrl, self.vel_ctrl\n\n if self.enemy is not None:\n self.enemy_coord[0], self.enemy_coord[1] = self.enemy[ObjectProp.Xcoord], self.enemy[ObjectProp.Ycoord]\n self.enemy_dir = self.enemy[ObjectProp.Dir]\n self.enemy_dir_vec[0], self.enemy_dir_vec[1] = np.cos(np.radians(self.enemy_dir)), np.sin(np.radians(self.enemy_dir))\n self.enemy_vel = self.enemy[ObjectProp.Velocity]\n self.angle, self.rotation_side = self.calc_nearest_dir(self.enemy_dir_vec, self.obj_dir_vec)\n self.obj_diff_coord = self.obj_coord - self.enemy_coord\n self.angle_objs, self.rotation_side_objs = self.calc_nearest_dir(self.obj_dir_vec, self.enemy_dir_vec)\n if self.angle > 175 and self.angle_objs > 175 and self.obj[ObjectProp.Velocity] > 130 and np.linalg.norm(self.enemy_coord - self.obj_coord) < self.front_crit_collision:\n self.turn_ctrl = 1\n self.vel_ctrl = -1\n return self.turn_ctrl, self.vel_ctrl\n\n self.aim_coord[0] = self.enemy_coord[0] - self.r_attack * np.cos(np.radians(self.enemy_dir))\n self.aim_coord[1] = self.enemy_coord[1] - self.r_attack * np.sin(np.radians(self.enemy_dir))\n self.diff_coord = self.aim_coord - self.obj_coord\n self.angle, self.rotation_side = self.calc_nearest_dir(self.obj_dir_vec, self.diff_coord)\n self.turn_mod = np.float(1) if self.angle > np.float(30) else np.float(1/30) * self.angle\n self.turn_ctrl = self.rotation_side * self.turn_mod\n\n self.vel_ctrl = np.float(1) if np.linalg.norm(self.diff_coord) > self.r_crit else np.float(0)\n return self.turn_ctrl, self.vel_ctrl\n\n\n\n", "sub_path": "ai_controls.py", "file_name": "ai_controls.py", "file_ext": "py", "file_size_in_byte": 12930, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "messages.AIcontrols", "line_number": 35, "usage_type": "attribute"}, {"api_name": "messages.AIcontrols", "line_number": 36, "usage_type": "attribute"}, {"api_name": "messages.AIcontrols", "line_number": 37, "usage_type": "attribute"}, {"api_name": "messages.AIcontrols", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pyglet.clock.schedule_interval", "line_number": 40, "usage_type": "call"}, {"api_name": "pyglet.clock", "line_number": 40, "usage_type": "attribute"}, {"api_name": "pyglet.clock.schedule_interval", "line_number": 41, "usage_type": "call"}, {"api_name": "pyglet.clock", "line_number": 41, "usage_type": "attribute"}, {"api_name": "messages.AIcontrols", "line_number": 46, "usage_type": "attribute"}]} +{"seq_id": "255428351", "text": "# app.py\n\nfrom flask import Flask\nfrom models import Base, User\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask import request\nfrom flask import render_template\nfrom yahoo_finance import Share\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'\ndb = SQLAlchemy(app)\n\n@app.route('/')\ndef root():\n db.session.add(User(request.remote_addr, 'your@ip.com'))\n db.session.commit()\n users = db.session.query(User).all()\n return u\"
\".join([u\"{0}: {1}\".format(user.name, user.email) for user in users])\n\n@app.route('/price')\ndef index():\n return 'GOOGLE Price'\n# example: http://localhost/GOOG\n@app.route('/price/')\ndef price_index(stock=None):\n if stock == None:\n stock == 'GOOG'\n\n stockshare = Share(stock).get_price()\n db.session.add(User(request.remote_addr, stockshare))\n db.session.commit()\n users = db.session.query(User).all()\n return render_template('market.html', users=users)\n\nif __name__ == '__main__':\n app.run()", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1031, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 12, "usage_type": "call"}, {"api_name": "models.User", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.remote_addr", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "models.User", "line_number": 18, "usage_type": "argument"}, {"api_name": "yahoo_finance.Share", "line_number": 30, "usage_type": "call"}, {"api_name": "models.User", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.request.remote_addr", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 31, "usage_type": "name"}, {"api_name": "models.User", "line_number": 33, "usage_type": "argument"}, {"api_name": "flask.render_template", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "4214525", "text": "\"\"\"\nAuthor - Rohan Raja (rohanraja9@gmail.com)\n\"\"\"\n\nimport numpy as np\nimport helpers\nimport cv2\nfrom helpers import *\n\nepsilon = 30\ndelta = 4\nystart_padding = 1\nyinc = 15\nWHITE_CLR = (255,255,255)\n\ndef findClosestCont(conts, point):\n\n minDist = 10000\n minCnt = None\n\n for cnt in conts:\n for p in cnt:\n # import pdb; pdb.set_trace()\n dist = helpers.getDistance(point, p)\n if dist < minDist:\n minDist = dist\n minCnt = cnt\n\n return minCnt\n\ndef find_ymax(conts):\n \n ys = []\n\n for cnt in conts:\n ys.append(cnt[:,1].max())\n\n y_max = np.min(ys)\n \n ys = []\n for cnt in conts:\n ys.append(cnt[:,1].min())\n\n y_min = np.min(ys)\n\n return y_min, y_max\n#\n# def getTicksText(image, corners, xticks):\n#\n# \"\"\"\n# For Rohit:\n# Complete this method to find y bounings\n# \"\"\"\n#\n# for yval in range(corners[1][1], corners[1][1] + 140):\n# blackdots = np.sum(image[yval, corners[0][0]:corners[2][0]] < 150)\n# print yval, blackdots\n# import pdb; pdb.set_trace()\n# print image\n#\n\ndef getTicksText(image, corners, xticks):\n\n img = image\n\n xticks = np.array(xticks)\n tick = xticks[1]\n mid_left = (xticks[0] + xticks[1])/2\n mid_right = (xticks[1] + xticks[2])/2\n\n ystart = tick[1]\n subcorners = [\n corners[1],\n (corners[1][0], img.shape[0]),\n (corners[2][0], img.shape[0]),\n corners[2],\n ]\n subimg = helpers.getSubImage(img, subcorners)\n\n ret,thresh = cv2.threshold(subimg,127,255,1)\n contours,h = cv2.findContours(thresh,1,2)\n conts = []\n for cnt in contours:\n approx = cv2.approxPolyDP(cnt,0.1*cv2.arcLength(cnt,True),True)\n approx = approx.reshape(approx.shape[0], approx.shape[2])\n approx = approx + corners[1]\n conts.append(approx)\n\n # cv2.drawContours(img,[approx],0,(0,255,255),2)\n\n\n cnt2 = findClosestCont(conts, xticks[4])\n\n ystart, yend = find_ymax(conts)\n ystart -= ystart_padding\n yend += ystart_padding\n\n out = []\n\n mid_dist = tick - mid_left\n y_up = np.array([0, ystart])\n y_down = np.array([0, yend])\n\n for tick in xticks:\n rect = []\n x_left = tick - mid_dist\n x_right = tick + mid_dist\n x_left[1] = 0\n x_right[1] = 0\n\n rect.append(x_left + y_up)\n rect.append(x_left + y_down)\n rect.append(x_right + y_down)\n rect.append(x_right + y_up)\n\n out.append(rect)\n\n\n # import pdb; pdb.set_trace()\n\n return out\n\ndef getTicksText2(image, corners, xticks):\n \"\"\"\n image - Image class instance from image.py\n corners - Array of 4 points of corners - \n Order of points in array is :\n\n Index Position\n 0 Top left\n 1 Bottom left\n 2 Bottom right\n 3 Top right\n\n\n xticks - array of tick coordinates\n\n output: array of rectangle surrounding tick text\n \"\"\"\n\n img = image\n\n xticks = np.array(xticks)\n tick = xticks[1]\n mid_left = (xticks[0] + xticks[1])/2\n mid_right = (xticks[1] + xticks[2])/2\n\n ystart = tick[1]\n\n while helpers.getDistance3(img[ystart][tick[0]], WHITE_CLR ) > epsilon:\n ystart += 1\n \n ystart += ystart_padding\n\n yend = ystart\n\n seenBlack = 0\n seenPureWhite = 0\n\n while seenBlack == 0 or seenPureWhite == 0 :\n\n colorAvg = img[yend][mid_left[0]:mid_right[0]].mean(0)\n dist = helpers.getDistance3(colorAvg, WHITE_CLR)\n\n if dist > delta:\n if seenBlack == 0:\n ystart = yend - yinc - ystart_padding\n seenBlack = 1\n elif seenBlack == 1:\n seenPureWhite = 1\n yend += yinc\n\n yend += ystart_padding\n\n out = []\n\n mid_dist = tick - mid_left\n y_up = np.array([0, ystart])\n y_down = np.array([0, yend])\n\n for tick in xticks:\n rect = []\n x_left = tick - mid_dist\n x_right = tick + mid_dist\n x_left[1] = 0\n x_right[1] = 0\n\n rect.append(x_left + y_up)\n rect.append(x_left + y_down)\n rect.append(x_right + y_down)\n rect.append(x_right + y_up)\n\n out.append(rect)\n\n\n # import pdb; pdb.set_trace()\n\n return out\n\n# def getYTicksText(image, corners, xticks):\n#\n#\n# for xval in range(corners[0][0], corners[0][0] - 220, -1):\n# blackdots = np.sum(image[corners[0][1]:corners[1][1], xval] < 150)\n# print xval, blackdots\n# import pdb; pdb.set_trace()\n# print image\n#\n\ndef getYTicksText(image, corners, yticks):\n\n img = image\n\n xticks = np.array(yticks)\n tick = xticks[1]\n mid_left = (xticks[0] + xticks[1])/2\n mid_right = (xticks[1] + xticks[2])/2\n\n ystart = tick[0]\n\n while helpers.getDistance3(img[tick[1]][ystart] , WHITE_CLR ) > epsilon:\n ystart -= 1\n \n ystart += ystart_padding\n\n yend = ystart\n\n seenBlack = 0\n seenPureWhite = 0\n\n while seenBlack == 0 or seenPureWhite == 0 :\n\n colorAvg = img[mid_left[1]:mid_right[1] , yend].mean(0)\n dist = helpers.getDistance3(colorAvg, WHITE_CLR)\n\n if dist > delta:\n if seenBlack == 0:\n # ystart = yend + yinc + ystart_padding\n seenBlack = 1\n elif seenBlack == 1:\n seenPureWhite = 1\n yend -= yinc\n\n yend -= ystart_padding\n\n out = []\n\n mid_dist = tick - mid_left\n y_up = np.array([ystart, 0])\n y_down = np.array([yend, 0])\n\n for tick in xticks:\n rect = []\n x_left = tick - mid_dist\n x_right = tick + mid_dist\n x_left[0] = 0\n x_right[0] = 0\n\n rect.append(x_left + y_down)\n rect.append(x_right + y_down)\n rect.append(x_right + y_up)\n rect.append(x_left + y_up)\n\n out.append(rect)\n\n\n\n return out\n\n # # image_transp = image.transpose((1,0,2))\n # yticks_inverted = []\n #\n # for ytk in yticks:\n # yticks_inverted.append(list(reversed(ytk)))\n #\n # return getTicksText(image_transp, corners, yticks_inverted)\n\n\n", "sub_path": "tickmethods_old.py", "file_name": "tickmethods_old.py", "file_ext": "py", "file_size_in_byte": 5916, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "helpers.getDistance", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "helpers.getSubImage", "line_number": 78, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 80, "usage_type": "call"}, {"api_name": "cv2.findContours", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.approxPolyDP", "line_number": 84, "usage_type": "call"}, {"api_name": "cv2.arcLength", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 143, "usage_type": "call"}, {"api_name": "helpers.getDistance3", "line_number": 150, "usage_type": "call"}, {"api_name": "helpers.getDistance3", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 178, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 179, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 214, "usage_type": "call"}, {"api_name": "helpers.getDistance3", "line_number": 221, "usage_type": "call"}, {"api_name": "helpers.getDistance3", "line_number": 234, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 249, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 250, "usage_type": "call"}]} +{"seq_id": "156461054", "text": "from apscheduler.schedulers.blocking import BlockingScheduler\nimport time\nimport datetime\nimport os\n\nsched = BlockingScheduler()\n\n\n@sched.scheduled_job('interval', hours=4)\ndef timed_job():\n print(\"Srarting Execution Again...\" + str(datetime.datetime.now()))\n f = open('ScheduleLog.txt', \"a\")\n if os.path.getsize(\"ScheduleLog.txt\") > 0:\n f.write(\"\\n\" + (str(datetime.datetime.now())) + \" Test Started \\n\")\n else:\n f.write((str(datetime.datetime.now())) + \" Test Started \\n \")\n\n os.system(\"mvn clean\")\n os.system(\"mvn test -Dtest=TestCaseCreate test\")\n f.close()\n\nos.system(\"mvn clean\")\nos.system(\"mvn test -Dtest=TestCaseCreate test\")\n\nsched.start()\n", "sub_path": "scheduler.py", "file_name": "scheduler.py", "file_ext": "py", "file_size_in_byte": 687, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "apscheduler.schedulers.blocking.BlockingScheduler", "line_number": 6, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "attribute"}, {"api_name": "os.path.getsize", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 18, "usage_type": "call"}, {"api_name": "os.system", "line_number": 19, "usage_type": "call"}, {"api_name": "os.system", "line_number": 22, "usage_type": "call"}, {"api_name": "os.system", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "571690465", "text": "#!/usr/bin/env python\n\nimport setuptools\nimport novaagent\nimport sys\n\n\nrequirements = ['netifaces', 'pyxs', 'pycrypto', 'PyYaml', 'distro']\nif sys.version_info[:2] < (2, 7):\n requirements.append('argparse')\n\n\ntest_requirements = ['mock', 'nose']\nif sys.version_info[:2] < (2, 7):\n test_requirements.extend(['flake8 < 3', 'unittest2'])\nelse:\n test_requirements.append('flake8')\n\n\nsetuptools.setup(\n name='novaagent',\n version=novaagent.__version__,\n description=(\n 'Rackspace utility for reading xenstore and configuring'\n ' guest instance on Xen server'\n ),\n author='Brian Metzler',\n author_email='brian.metzler@rackspace.com',\n install_requires=requirements,\n extras_require={\n 'tests': test_requirements\n },\n entry_points={\n 'console_scripts': [\n 'nova-agent=novaagent.novaagent:main'\n ]\n },\n packages=[\n 'novaagent',\n 'novaagent.libs',\n 'novaagent.common',\n 'novaagent.xenstore'\n ],\n zip_safe=False\n)\n", "sub_path": "setup.py", "file_name": "setup.py", "file_ext": "py", "file_size_in_byte": 1029, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.version_info", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sys.version_info", "line_number": 14, "usage_type": "attribute"}, {"api_name": "setuptools.setup", "line_number": 20, "usage_type": "call"}, {"api_name": "novaagent.__version__", "line_number": 22, "usage_type": "attribute"}]} +{"seq_id": "229208206", "text": "#Boa:FramePanel:KeithleyIVPanel\n\n\n\n#-----------------------------------------------------------------------------\n# Name: KeithleyIVPanel.py\n# Purpose: A Front End for taking IV's with the Keithley\n#\n# Author: Aric Sanders\n#\n# Created: 2016/06/27\n# RCS-ID: $Id: KeithleyIVPanel.py $\n# Licence: MIT\n#-----------------------------------------------------------------------------\n\n\n\"\"\" KeithleyIVPanel is a GUI class for taking IV's with the Keithley piccoammeter\nHelp\n---------------\n`pyMez.Code.FrontEnds`\n
\nDocumentation Home |\nAPI Documentation Home |\nExamples Home |\nIndex\n
\"\"\"\n\n#-------------------------------------------------------------------------------\n# Standard Imports\nimport wx\nimport sys\nimport os\n\n#-------------------------------------------------------------------------------\n#Thid Party Imports\nsys.path.append(os.path.join(os.path.dirname( __file__ ), '..','..'))\ntry:\n from Code.InstrumentControl.Experiments import *\nexcept:\n print('There was an error importing pyMez')\nIMAGE_DIRECTORY=os.path.join(os.path.dirname(os.path.realpath(__file__)),'img')\n\n[wxID_KEITHLEYIVPANEL, wxID_KEITHLEYIVPANELACTIONPANEL, \n wxID_KEITHLEYIVPANELAQUISTIONCONTROLPANEL, \n wxID_KEITHLEYIVPANELAQUISTIONLABELPANEL, \n wxID_KEITHLEYIVPANELAQUISTIONPARAMETERPANEL, \n wxID_KEITHLEYIVPANELBOWTIECONTROL, wxID_KEITHLEYIVPANELBOWTIELABEL, \n wxID_KEITHLEYIVPANELINFORMATIONPANEL, wxID_KEITHLEYIVPANELIVBUTTON, \n wxID_KEITHLEYIVPANELNOTESCONTROL, wxID_KEITHLEYIVPANELNOTESLABEL, \n wxID_KEITHLEYIVPANELNOTESPANEL, wxID_KEITHLEYIVPANELNUMBEROFPOINTSCONTROL, \n wxID_KEITHLEYIVPANELNUMBEROFPOINTSLABEL, wxID_KEITHLEYIVPANELPANEL1, \n wxID_KEITHLEYIVPANELPLOTBUTTON, wxID_KEITHLEYIVPANELRESISTANCECONTROL, \n wxID_KEITHLEYIVPANELRESISTANCELABEL, wxID_KEITHLEYIVPANELSAMPLENAMECONTROL, \n wxID_KEITHLEYIVPANELSAMPLENAMELABEL, wxID_KEITHLEYIVPANELSAVEBUTTON, \n wxID_KEITHLEYIVPANELSETTLETIMECONTROL, wxID_KEITHLEYIVPANELSETTLETIMELABEL, \n wxID_KEITHLEYIVPANELSTARTCONTROL, wxID_KEITHLEYIVPANELSTARTLABEL, \n wxID_KEITHLEYIVPANELSTOPCONTROL, wxID_KEITHLEYIVPANELSTOPLABEL, \n] = [wx.NewId() for _init_ctrls in range(27)]\n\nclass KeithleyIVPanel(wx.Panel):\n def _init_coll_AquistionLabelSizer_Items(self, parent):\n # generated method, don't edit\n\n parent.AddWindow(self.StartLabel, 1, border=1,\n flag=wx.EXPAND | wx.ALIGN_CENTER)\n parent.AddWindow(self.StopLabel, 1, border=1,\n flag=wx.EXPAND | wx.ALIGN_CENTER)\n parent.AddWindow(self.NumberOfPointsLabel, 1, border=1,\n flag=wx.ALIGN_CENTER | wx.EXPAND)\n parent.AddWindow(self.SettleTimeLabel, 1, border=1,\n flag=wx.ALIGN_CENTER | wx.EXPAND)\n parent.AddWindow(self.BowtieLabel, 1, border=1,\n flag=wx.EXPAND | wx.ALIGN_CENTER)\n\n def _init_coll_AquistionControlSizer_Items(self, parent):\n # generated method, don't edit\n\n parent.AddWindow(self.StartControl, 1, border=0, flag=wx.ALIGN_CENTER)\n parent.AddWindow(self.StopControl, 1, border=0, flag=wx.ALIGN_CENTER)\n parent.AddWindow(self.NumberOfPointsControl, 1, border=0,\n flag=wx.ALIGN_CENTER)\n parent.AddWindow(self.SettleTimeControl, 1, border=0,\n flag=wx.ALIGN_CENTER)\n parent.AddWindow(self.BowtieControl, 1, border=0, flag=wx.ALIGN_CENTER)\n\n def _init_coll_AquistionParameterSizer_Items(self, parent):\n # generated method, don't edit\n\n parent.AddWindow(self.AquistionLabelPanel, 1, border=1,\n flag=wx.ALL | wx.EXPAND)\n parent.AddWindow(self.AquistionControlPanel, 1, border=1,\n flag=wx.ALL | wx.EXPAND)\n\n def _init_coll_PanelSizer_Items(self, parent):\n # generated method, don't edit\n\n parent.AddWindow(self.InformationPanel, 2, border=2,\n flag=wx.ALL | wx.EXPAND)\n parent.AddWindow(self.AquistionParameterPanel, 1, border=2,\n flag=wx.ALL | wx.EXPAND)\n parent.AddWindow(self.ActionPanel, 1, border=2, flag=wx.ALL | wx.EXPAND)\n parent.AddWindow(self.NotesPanel, 4, border=2, flag=wx.ALL | wx.EXPAND)\n\n def _init_coll_ActionSizer_Items(self, parent):\n # generated method, don't edit\n\n parent.AddWindow(self.IVButton, 1, border=5, flag=wx.EXPAND | wx.ALL)\n parent.AddWindow(self.SaveButton, 1, border=5, flag=wx.ALL | wx.EXPAND)\n parent.AddWindow(self.PlotButton, 1, border=5, flag=wx.ALL | wx.EXPAND)\n\n def _init_coll_NotesSizer_Items(self, parent):\n # generated method, don't edit\n\n parent.AddWindow(self.NotesLabel, 0, border=0, flag=0)\n parent.AddWindow(self.NotesControl, 1, border=2,\n flag=wx.ALL | wx.EXPAND)\n\n def _init_coll_InformationSizer_Items(self, parent):\n # generated method, don't edit\n\n parent.AddWindow(self.SampleNameLabel, 0, border=0, flag=0)\n parent.AddWindow(self.SampleNameControl, 0, border=0, flag=0)\n parent.AddWindow(self.panel1, 0, border=0, flag=0)\n parent.AddWindow(self.ResistanceLabel, 0, border=0, flag=0)\n parent.AddWindow(self.ResistanceControl, 0, border=0, flag=0)\n\n def _init_sizers(self):\n # generated method, don't edit\n self.PanelSizer = wx.BoxSizer(orient=wx.VERTICAL)\n\n self.InformationSizer = wx.BoxSizer(orient=wx.HORIZONTAL)\n\n self.AquistionParameterSizer = wx.BoxSizer(orient=wx.VERTICAL)\n\n self.ActionSizer = wx.BoxSizer(orient=wx.HORIZONTAL)\n\n self.NotesSizer = wx.BoxSizer(orient=wx.VERTICAL)\n\n self.AquistionLabelSizer = wx.BoxSizer(orient=wx.HORIZONTAL)\n\n self.AquistionControlSizer = wx.BoxSizer(orient=wx.HORIZONTAL)\n\n self._init_coll_PanelSizer_Items(self.PanelSizer)\n self._init_coll_InformationSizer_Items(self.InformationSizer)\n self._init_coll_AquistionParameterSizer_Items(self.AquistionParameterSizer)\n self._init_coll_ActionSizer_Items(self.ActionSizer)\n self._init_coll_NotesSizer_Items(self.NotesSizer)\n self._init_coll_AquistionLabelSizer_Items(self.AquistionLabelSizer)\n self._init_coll_AquistionControlSizer_Items(self.AquistionControlSizer)\n\n self.SetSizer(self.PanelSizer)\n self.InformationPanel.SetSizer(self.InformationSizer)\n self.NotesPanel.SetSizer(self.NotesSizer)\n self.AquistionControlPanel.SetSizer(self.AquistionControlSizer)\n self.AquistionLabelPanel.SetSizer(self.AquistionLabelSizer)\n self.ActionPanel.SetSizer(self.ActionSizer)\n self.AquistionParameterPanel.SetSizer(self.AquistionParameterSizer)\n\n def _init_ctrls(self, prnt):\n # generated method, don't edit\n wx.Panel.__init__(self, id=wxID_KEITHLEYIVPANEL,\n name='KeithleyIVPanel', parent=prnt, pos=wx.Point(341, 397),\n size=wx.Size(768, 643), style=wx.TAB_TRAVERSAL)\n self.SetClientSize(wx.Size(760, 609))\n self.SetBackgroundColour(wx.Colour(192, 192, 192))\n\n self.InformationPanel = wx.Panel(id=wxID_KEITHLEYIVPANELINFORMATIONPANEL,\n name='InformationPanel', parent=self, pos=wx.Point(2, 2),\n size=wx.Size(756, 148), style=wx.TAB_TRAVERSAL)\n\n self.AquistionParameterPanel = wx.Panel(id=wxID_KEITHLEYIVPANELAQUISTIONPARAMETERPANEL,\n name='AquistionParameterPanel', parent=self, pos=wx.Point(2,\n 154), size=wx.Size(756, 72), style=wx.TAB_TRAVERSAL)\n\n self.ActionPanel = wx.Panel(id=wxID_KEITHLEYIVPANELACTIONPANEL,\n name='ActionPanel', parent=self, pos=wx.Point(2, 230),\n size=wx.Size(756, 72), style=wx.TAB_TRAVERSAL)\n self.ActionPanel.SetBackgroundColour(wx.Colour(143, 133, 69))\n\n self.NotesPanel = wx.Panel(id=wxID_KEITHLEYIVPANELNOTESPANEL,\n name='NotesPanel', parent=self, pos=wx.Point(2, 306),\n size=wx.Size(756, 300), style=wx.TAB_TRAVERSAL)\n\n self.NotesLabel = wx.StaticText(id=wxID_KEITHLEYIVPANELNOTESLABEL,\n label='Notes:', name='NotesLabel', parent=self.NotesPanel,\n pos=wx.Point(0, 0), size=wx.Size(35, 13), style=0)\n self.NotesLabel.SetFont(wx.Font(8, wx.SWISS, wx.NORMAL, wx.BOLD, False,\n 'MS Shell Dlg 2'))\n\n self.NotesControl = wx.TextCtrl(id=wxID_KEITHLEYIVPANELNOTESCONTROL,\n name='NotesControl', parent=self.NotesPanel, pos=wx.Point(2, 15),\n size=wx.Size(752, 283), style=wx.VSCROLL | wx.TE_MULTILINE,\n value='')\n\n self.IVButton = wx.Button(id=wxID_KEITHLEYIVPANELIVBUTTON,\n label='Take IV', name='IVButton', parent=self.ActionPanel,\n pos=wx.Point(5, 5), size=wx.Size(242, 62), style=0)\n self.IVButton.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD, False,\n 'MS Shell Dlg 2'))\n self.IVButton.Bind(wx.EVT_BUTTON, self.OnIVButtonButton,\n id=wxID_KEITHLEYIVPANELIVBUTTON)\n\n self.SaveButton = wx.Button(id=wxID_KEITHLEYIVPANELSAVEBUTTON,\n label='Save Data', name='SaveButton', parent=self.ActionPanel,\n pos=wx.Point(257, 5), size=wx.Size(242, 62), style=0)\n self.SaveButton.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD, False,\n 'MS Shell Dlg 2'))\n self.SaveButton.Bind(wx.EVT_BUTTON, self.OnSaveButtonButton,\n id=wxID_KEITHLEYIVPANELSAVEBUTTON)\n\n self.PlotButton = wx.Button(id=wxID_KEITHLEYIVPANELPLOTBUTTON,\n label='Plot Data', name='PlotButton', parent=self.ActionPanel,\n pos=wx.Point(509, 5), size=wx.Size(242, 62), style=0)\n self.PlotButton.SetFont(wx.Font(18, wx.SWISS, wx.NORMAL, wx.BOLD, False,\n 'MS Shell Dlg 2'))\n self.PlotButton.Bind(wx.EVT_BUTTON, self.OnPlotButtonButton,\n id=wxID_KEITHLEYIVPANELPLOTBUTTON)\n\n self.SampleNameLabel = wx.StaticText(id=wxID_KEITHLEYIVPANELSAMPLENAMELABEL,\n label='Sample Name:', name='SampleNameLabel',\n parent=self.InformationPanel, pos=wx.Point(0, 0),\n size=wx.Size(104, 19), style=0)\n self.SampleNameLabel.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL,\n False, 'MS Shell Dlg 2'))\n\n self.SampleNameControl = wx.TextCtrl(id=wxID_KEITHLEYIVPANELSAMPLENAMECONTROL,\n name='SampleNameControl', parent=self.InformationPanel,\n pos=wx.Point(104, 0), size=wx.Size(344, 21), style=0, value='')\n\n self.AquistionLabelPanel = wx.Panel(id=wxID_KEITHLEYIVPANELAQUISTIONLABELPANEL,\n name='AquistionLabelPanel', parent=self.AquistionParameterPanel,\n pos=wx.Point(1, 1), size=wx.Size(754, 34),\n style=wx.TAB_TRAVERSAL)\n\n self.AquistionControlPanel = wx.Panel(id=wxID_KEITHLEYIVPANELAQUISTIONCONTROLPANEL,\n name='AquistionControlPanel',\n parent=self.AquistionParameterPanel, pos=wx.Point(1, 37),\n size=wx.Size(754, 34), style=wx.TAB_TRAVERSAL)\n self.AquistionControlPanel.SetBackgroundColour(wx.Colour(192, 192, 192))\n\n self.StartLabel = wx.StaticText(id=wxID_KEITHLEYIVPANELSTARTLABEL,\n label='Start', name='StartLabel',\n parent=self.AquistionLabelPanel, pos=wx.Point(0, 0),\n size=wx.Size(150, 34), style=wx.SIMPLE_BORDER | wx.ALIGN_CENTRE)\n self.StartLabel.SetBackgroundColour(wx.Colour(192, 192, 192))\n self.StartLabel.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL,\n False, 'MS Shell Dlg 2'))\n\n self.StopLabel = wx.StaticText(id=wxID_KEITHLEYIVPANELSTOPLABEL,\n label='Stop', name='StopLabel', parent=self.AquistionLabelPanel,\n pos=wx.Point(150, 0), size=wx.Size(150, 34),\n style=wx.SIMPLE_BORDER | wx.ALIGN_CENTRE)\n self.StopLabel.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL,\n False, 'MS Shell Dlg 2'))\n\n self.NumberOfPointsLabel = wx.StaticText(id=wxID_KEITHLEYIVPANELNUMBEROFPOINTSLABEL,\n label='Number of Points', name='NumberOfPointsLabel',\n parent=self.AquistionLabelPanel, pos=wx.Point(300, 0),\n size=wx.Size(150, 34), style=wx.ALIGN_CENTRE | wx.SIMPLE_BORDER)\n self.NumberOfPointsLabel.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL,\n wx.NORMAL, False, 'MS Shell Dlg 2'))\n self.NumberOfPointsLabel.SetBackgroundColour(wx.Colour(192, 192, 192))\n\n self.SettleTimeLabel = wx.StaticText(id=wxID_KEITHLEYIVPANELSETTLETIMELABEL,\n label='Settle Time', name='SettleTimeLabel',\n parent=self.AquistionLabelPanel, pos=wx.Point(450, 0),\n size=wx.Size(150, 34), style=wx.ALIGN_CENTRE | wx.SIMPLE_BORDER)\n self.SettleTimeLabel.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL,\n False, 'MS Shell Dlg 2'))\n\n self.BowtieLabel = wx.StaticText(id=wxID_KEITHLEYIVPANELBOWTIELABEL,\n label='Bowtie Sweep', name='BowtieLabel',\n parent=self.AquistionLabelPanel, pos=wx.Point(600, 0),\n size=wx.Size(150, 34), style=wx.ALIGN_CENTRE | wx.SIMPLE_BORDER)\n self.BowtieLabel.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL,\n False, 'MS Shell Dlg 2'))\n\n self.StartControl = wx.TextCtrl(id=wxID_KEITHLEYIVPANELSTARTCONTROL,\n name='StartControl', parent=self.AquistionControlPanel,\n pos=wx.Point(0, 6), size=wx.Size(150, 21), style=wx.CAPTION,\n value='-.1')\n\n self.StopControl = wx.TextCtrl(id=wxID_KEITHLEYIVPANELSTOPCONTROL,\n name='StopControl', parent=self.AquistionControlPanel,\n pos=wx.Point(150, 6), size=wx.Size(150, 21), style=0,\n value='.1')\n\n self.NumberOfPointsControl = wx.TextCtrl(id=wxID_KEITHLEYIVPANELNUMBEROFPOINTSCONTROL,\n name='NumberOfPointsControl', parent=self.AquistionControlPanel,\n pos=wx.Point(300, 6), size=wx.Size(150, 21), style=0,\n value='10')\n\n self.SettleTimeControl = wx.TextCtrl(id=wxID_KEITHLEYIVPANELSETTLETIMECONTROL,\n name='SettleTimeControl', parent=self.AquistionControlPanel,\n pos=wx.Point(450, 6), size=wx.Size(150, 21), style=0,\n value='.2')\n\n self.BowtieControl = wx.CheckBox(id=wxID_KEITHLEYIVPANELBOWTIECONTROL,\n label='Bowtie', name='BowtieControl',\n parent=self.AquistionControlPanel, pos=wx.Point(600, 10),\n size=wx.Size(150, 13), style=0)\n self.BowtieControl.SetValue(False)\n\n self.ResistanceLabel = wx.StaticText(id=wxID_KEITHLEYIVPANELRESISTANCELABEL,\n label='Resistance', name='ResistanceLabel',\n parent=self.InformationPanel, pos=wx.Point(544, 0),\n size=wx.Size(73, 19), style=0)\n self.ResistanceLabel.Enable(True)\n self.ResistanceLabel.SetFont(wx.Font(12, wx.SWISS, wx.NORMAL, wx.NORMAL,\n False, 'MS Shell Dlg 2'))\n\n self.ResistanceControl = wx.TextCtrl(id=wxID_KEITHLEYIVPANELRESISTANCECONTROL,\n name='ResistanceControl', parent=self.InformationPanel,\n pos=wx.Point(617, 0), size=wx.Size(100, 21), style=0, value='')\n\n self.panel1 = wx.Panel(id=wxID_KEITHLEYIVPANELPANEL1, name='panel1',\n parent=self.InformationPanel, pos=wx.Point(448, 0),\n size=wx.Size(96, 24), style=wx.TAB_TRAVERSAL)\n\n self._init_sizers()\n\n def __init__(self, parent, id, pos, size, style, name):\n self._init_ctrls(parent)\n self.experiment=KeithleyIV()\n\n def OnIVButtonButton(self, event):\n try:\n [start,stop,number_points,settling_time,bowtie]=[float(self.StartControl.GetValue()),\n float(self.StopControl.GetValue()),int(self.NumberOfPointsControl.GetValue()),\n float(self.SettleTimeControl.GetValue()),self.BowtieControl.GetValue()]\n #print(start,stop,number_points,settle_time,bowtie)\n voltage_list=self.experiment.make_voltage_list(start,stop,number_points,bowtie)\n #print(voltage_list)\n try:\n self.experiment.initialize_keithley()\n if self.experiment.instrument.fake_mode:\n raise\n self.experiment.take_IV(voltage_list,settle_time=settling_time)\n except:\n text='Entering fake mode, keithley did not respond fake R=12000.1'\n print(text)\n self.NotesControl.SetValue(text)\n self.NotesControl.SetBackgroundColour(wx.Colour(192, 0, 0))\n fake_list=voltage_list\n for index,voltage in enumerate(fake_list):\n current=voltage/12000.1\n self.experiment.data_list.append({'Index':index,'Voltage':voltage,'Current':current}) \n self.experiment.calculate_resistance()\n self.ResistanceControl.SetValue(str(self.experiment.resistance))\n dlg = wx.MessageDialog(self, 'IV is Done!', 'IV Finished', wx.OK | wx.ICON_INFORMATION)\n try:\n result = dlg.ShowModal()\n finally:\n dlg.Destroy()\n \n except:\n raise\n print(\"IV Button Failure\")\n \n \n event.Skip()\n\n def OnSaveButtonButton(self, event):\n self.experiment.notes=self.NotesControl.GetValue()\n self.experiment.name=self.SampleNameControl.GetValue()\n self.experiment.save_data()\n event.Skip()\n\n def OnPlotButtonButton(self, event):\n self.experiment.plot_data()\n event.Skip()\n\n\n\ndef test_KeithleyIVPanel():\n app = wx.PySimpleApp()\n frame = wx.Frame(None,size=wx.Size(900, 800))\n panel=KeithleyIVPanel(id=1, name='IV Panel',\n parent=frame, pos=wx.Point(350, 204), size=wx.Size(200, 800),\n style=wx.TAB_TRAVERSAL)\n sizer=wx.BoxSizer()\n sizer.Add(panel,1,wx.EXPAND,2)\n frame.SetSizerAndFit(sizer)\n frame.SetSize(wx.Size(800, 600))\n frame.Show()\n\n app.MainLoop() \n \n \n \nif __name__ == '__main__':\n test_KeithleyIVPanel()\n", "sub_path": "Code/FrontEnds/KeithleyIVPanel.py", "file_name": "KeithleyIVPanel.py", "file_ext": "py", "file_size_in_byte": 18249, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.path.append", "line_number": 36, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 41, "usage_type": "call"}, {"api_name": "wx.NewId", "line_number": 58, "usage_type": "call"}, {"api_name": "wx.Panel", "line_number": 60, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 65, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 65, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 67, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 67, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 69, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 69, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 71, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 71, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 73, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 73, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 78, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 79, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 81, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 83, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTER", "line_number": 84, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 90, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 90, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 92, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 92, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 98, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 98, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 100, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 100, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 101, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 101, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 102, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 102, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 107, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 107, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 108, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 108, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 109, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 109, "usage_type": "attribute"}, {"api_name": "wx.ALL", "line_number": 116, "usage_type": "attribute"}, {"api_name": "wx.EXPAND", "line_number": 116, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 129, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 129, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 131, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 131, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 133, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 133, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 135, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 135, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 137, "usage_type": "call"}, {"api_name": "wx.VERTICAL", "line_number": 137, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 139, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 139, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 141, "usage_type": "call"}, {"api_name": "wx.HORIZONTAL", "line_number": 141, "usage_type": "attribute"}, {"api_name": "wx.Panel.__init__", "line_number": 161, "usage_type": "call"}, {"api_name": "wx.Panel", "line_number": 161, "usage_type": "attribute"}, {"api_name": "wx.Point", "line_number": 162, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 163, "usage_type": "call"}, {"api_name": "wx.TAB_TRAVERSAL", "line_number": 163, "usage_type": "attribute"}, {"api_name": "wx.Size", "line_number": 164, "usage_type": "call"}, {"api_name": "wx.Colour", "line_number": 165, "usage_type": "call"}, {"api_name": "wx.Panel", "line_number": 167, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 168, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 169, "usage_type": "call"}, {"api_name": "wx.TAB_TRAVERSAL", "line_number": 169, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 171, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 172, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 173, "usage_type": "call"}, {"api_name": "wx.TAB_TRAVERSAL", "line_number": 173, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 175, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 176, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 177, "usage_type": "call"}, {"api_name": "wx.TAB_TRAVERSAL", "line_number": 177, "usage_type": "attribute"}, {"api_name": "wx.Colour", "line_number": 178, "usage_type": "call"}, {"api_name": "wx.Panel", "line_number": 180, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 181, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 182, "usage_type": "call"}, {"api_name": "wx.TAB_TRAVERSAL", "line_number": 182, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 184, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 186, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 186, "usage_type": "call"}, {"api_name": "wx.Font", "line_number": 187, "usage_type": "call"}, {"api_name": "wx.SWISS", "line_number": 187, "usage_type": "attribute"}, {"api_name": "wx.NORMAL", "line_number": 187, "usage_type": "attribute"}, {"api_name": "wx.BOLD", "line_number": 187, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 190, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 191, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 192, "usage_type": "call"}, {"api_name": "wx.VSCROLL", "line_number": 192, "usage_type": "attribute"}, {"api_name": "wx.TE_MULTILINE", "line_number": 192, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 195, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 197, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 197, "usage_type": "call"}, {"api_name": "wx.Font", "line_number": 198, "usage_type": "call"}, {"api_name": "wx.SWISS", "line_number": 198, "usage_type": "attribute"}, {"api_name": "wx.NORMAL", "line_number": 198, "usage_type": "attribute"}, {"api_name": "wx.BOLD", "line_number": 198, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 200, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 203, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 205, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 205, "usage_type": "call"}, {"api_name": "wx.Font", "line_number": 206, "usage_type": "call"}, {"api_name": "wx.SWISS", "line_number": 206, "usage_type": "attribute"}, {"api_name": "wx.NORMAL", "line_number": 206, "usage_type": "attribute"}, {"api_name": "wx.BOLD", "line_number": 206, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 208, "usage_type": "attribute"}, {"api_name": "wx.Button", "line_number": 211, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 213, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 213, "usage_type": "call"}, {"api_name": "wx.Font", "line_number": 214, "usage_type": "call"}, {"api_name": "wx.SWISS", "line_number": 214, "usage_type": "attribute"}, {"api_name": "wx.NORMAL", "line_number": 214, "usage_type": "attribute"}, {"api_name": "wx.BOLD", "line_number": 214, "usage_type": "attribute"}, {"api_name": "wx.EVT_BUTTON", "line_number": 216, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 219, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 221, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 222, "usage_type": "call"}, {"api_name": "wx.Font", "line_number": 223, "usage_type": "call"}, {"api_name": "wx.SWISS", "line_number": 223, "usage_type": "attribute"}, {"api_name": "wx.NORMAL", "line_number": 223, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 226, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 228, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 228, "usage_type": "call"}, {"api_name": "wx.Panel", "line_number": 230, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 232, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 232, "usage_type": "call"}, {"api_name": "wx.TAB_TRAVERSAL", "line_number": 233, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 235, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 237, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 238, "usage_type": "call"}, {"api_name": "wx.TAB_TRAVERSAL", "line_number": 238, "usage_type": "attribute"}, {"api_name": "wx.Colour", "line_number": 239, "usage_type": "call"}, {"api_name": "wx.StaticText", "line_number": 241, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 243, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 244, "usage_type": "call"}, {"api_name": "wx.SIMPLE_BORDER", "line_number": 244, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTRE", "line_number": 244, "usage_type": "attribute"}, {"api_name": "wx.Colour", "line_number": 245, "usage_type": "call"}, {"api_name": "wx.Font", "line_number": 246, "usage_type": "call"}, {"api_name": "wx.SWISS", "line_number": 246, "usage_type": "attribute"}, {"api_name": "wx.NORMAL", "line_number": 246, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 249, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 251, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 251, "usage_type": "call"}, {"api_name": "wx.SIMPLE_BORDER", "line_number": 252, "usage_type": "attribute"}, {"api_name": "wx.ALIGN_CENTRE", "line_number": 252, "usage_type": "attribute"}, {"api_name": "wx.Font", "line_number": 253, "usage_type": "call"}, {"api_name": "wx.SWISS", "line_number": 253, "usage_type": "attribute"}, {"api_name": "wx.NORMAL", "line_number": 253, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 256, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 258, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 259, "usage_type": "call"}, {"api_name": "wx.ALIGN_CENTRE", "line_number": 259, "usage_type": "attribute"}, {"api_name": "wx.SIMPLE_BORDER", "line_number": 259, "usage_type": "attribute"}, {"api_name": "wx.Font", "line_number": 260, "usage_type": "call"}, {"api_name": "wx.SWISS", "line_number": 260, "usage_type": "attribute"}, {"api_name": "wx.NORMAL", "line_number": 260, "usage_type": "attribute"}, {"api_name": "wx.NORMAL", "line_number": 261, "usage_type": "attribute"}, {"api_name": "wx.Colour", "line_number": 262, "usage_type": "call"}, {"api_name": "wx.StaticText", "line_number": 264, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 266, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 267, "usage_type": "call"}, {"api_name": "wx.ALIGN_CENTRE", "line_number": 267, "usage_type": "attribute"}, {"api_name": "wx.SIMPLE_BORDER", "line_number": 267, "usage_type": "attribute"}, {"api_name": "wx.Font", "line_number": 268, "usage_type": "call"}, {"api_name": "wx.SWISS", "line_number": 268, "usage_type": "attribute"}, {"api_name": "wx.NORMAL", "line_number": 268, "usage_type": "attribute"}, {"api_name": "wx.StaticText", "line_number": 271, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 273, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 274, "usage_type": "call"}, {"api_name": "wx.ALIGN_CENTRE", "line_number": 274, "usage_type": "attribute"}, {"api_name": "wx.SIMPLE_BORDER", "line_number": 274, "usage_type": "attribute"}, {"api_name": "wx.Font", "line_number": 275, "usage_type": "call"}, {"api_name": "wx.SWISS", "line_number": 275, "usage_type": "attribute"}, {"api_name": "wx.NORMAL", "line_number": 275, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 278, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 280, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 280, "usage_type": "call"}, {"api_name": "wx.CAPTION", "line_number": 280, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 283, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 285, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 285, "usage_type": "call"}, {"api_name": "wx.TextCtrl", "line_number": 288, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 290, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 290, "usage_type": "call"}, {"api_name": "wx.TextCtrl", "line_number": 293, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 295, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 295, "usage_type": "call"}, {"api_name": "wx.CheckBox", "line_number": 298, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 300, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 301, "usage_type": "call"}, {"api_name": "wx.StaticText", "line_number": 304, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 306, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 307, "usage_type": "call"}, {"api_name": "wx.Font", "line_number": 309, "usage_type": "call"}, {"api_name": "wx.SWISS", "line_number": 309, "usage_type": "attribute"}, {"api_name": "wx.NORMAL", "line_number": 309, "usage_type": "attribute"}, {"api_name": "wx.TextCtrl", "line_number": 312, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 314, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 314, "usage_type": "call"}, {"api_name": "wx.Panel", "line_number": 316, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 317, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 318, "usage_type": "call"}, {"api_name": "wx.TAB_TRAVERSAL", "line_number": 318, "usage_type": "attribute"}, {"api_name": "wx.Colour", "line_number": 343, "usage_type": "call"}, {"api_name": "wx.MessageDialog", "line_number": 350, "usage_type": "call"}, {"api_name": "wx.OK", "line_number": 350, "usage_type": "attribute"}, {"api_name": "wx.ICON_INFORMATION", "line_number": 350, "usage_type": "attribute"}, {"api_name": "wx.PySimpleApp", "line_number": 376, "usage_type": "call"}, {"api_name": "wx.Frame", "line_number": 377, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 377, "usage_type": "call"}, {"api_name": "wx.Point", "line_number": 379, "usage_type": "call"}, {"api_name": "wx.Size", "line_number": 379, "usage_type": "call"}, {"api_name": "wx.TAB_TRAVERSAL", "line_number": 380, "usage_type": "attribute"}, {"api_name": "wx.BoxSizer", "line_number": 381, "usage_type": "call"}, {"api_name": "wx.EXPAND", "line_number": 382, "usage_type": "attribute"}, {"api_name": "wx.Size", "line_number": 384, "usage_type": "call"}]} +{"seq_id": "33866242", "text": "import torch\nimport torch.nn as nn\nimport argparse\nimport torch.optim as optim\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport os\nfrom Project2_data_myself import get_train_test_set\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport cv2\n\n# 1. 定义卷积神经网络\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n # 卷积参数:in_channel, out_channel, kernel_size, stride, padding\n # block 1\n self.conv1_1 = nn.Conv2d(1, 8, 5, 2, 0)\n # block 2\n self.conv2_1 = nn.Conv2d(8, 16, 3, 1, 0)\n self.conv2_2 = nn.Conv2d(16, 16, 3, 1, 0)\n # block 3\n self.conv3_1 = nn.Conv2d(16, 24, 3, 1, 0)\n self.conv3_2 = nn.Conv2d(24, 24, 3, 1, 0)\n # block 4\n self.conv4_1 = nn.Conv2d(24, 40, 3, 1, 1)\n self.conv4_2 = nn.Conv2d(40, 80, 3, 1, 1)\n # points branch\n self.ip1 = nn.Linear(4 * 4 * 80, 128)\n self.ip2 = nn.Linear(128, 128)\n self.ip3 = nn.Linear(128, 42) # landmarks\n\n # common used\n self.prelu1_1 = nn.PReLU()\n self.prelu2_1 = nn.PReLU()\n self.prelu2_2 = nn.PReLU()\n self.prelu3_1 = nn.PReLU()\n self.prelu3_2 = nn.PReLU()\n self.prelu4_1 = nn.PReLU()\n self.prelu4_2 = nn.PReLU()\n self.preluip1 = nn.PReLU()\n self.preluip2 = nn.PReLU()\n self.ave_pool = nn.AvgPool2d(2, 2, ceil_mode=True) # pool1,pool2,pool3\n\n def forward(self, x): # x is input\n # block 1\n x = self.conv1_1(x)\n x = self.prelu1_1(x)\n x = self.ave_pool(x)\n # block 2\n x = self.conv2_1(x)\n x = self.prelu2_1(x)\n x = self.conv2_2(x)\n x = self.prelu2_2(x)\n x = self.ave_pool(x)\n # block 3\n x = self.conv3_1(x)\n x = self.prelu3_1(x)\n x = self.conv3_2(x)\n x = self.prelu3_2(x)\n x = self.ave_pool(x)\n # block 4\n x = self.conv4_1(x)\n x = self.prelu4_1(x)\n x = self.conv4_2(x)\n x = self.prelu4_2(x)\n # points branch\n ip1 = x.view(-1, 4 * 4 * 80) # flatten\n ip1 = self.preluip1(self.ip1(ip1))\n ip2 = self.preluip2(self.ip2(ip1))\n ip3 = self.ip3(ip2)\n\n return ip3\n\n# 2. 搭建训练框架\ndef main_test():\n # 1、进行参数设置\n parser = argparse.ArgumentParser(description='Detector')\n parser.add_argument('--train-batch-size', type=int, default=16, metavar='N',\n help='input batch size for training (default=64)')\n parser.add_argument('--test-batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default=64)')\n parser.add_argument('--epochs', type=int, default=1000, metavar='N',\n help='number of epochs to train(default=100)')\n parser.add_argument('--lr', type=float, default=0.00001, metavar='LR',\n help='learning rate (default=0.001)')\n parser.add_argument('--momentum', type=float, default=0.9, metavar='M',\n help='SGD momentum(default=0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=100, metavar='S',\n help='random seed(default=1)')\n parser.add_argument('--log-interval', type=int, default=80, metavar='N',\n help='how many bathes to wait before logging training status')\n parser.add_argument('--save-model', action='store_true', default=True,\n help='save the current Model')\n parser.add_argument('--save-directory', type=str, default='trained_models',\n help='learnt models are saving here')\n parser.add_argument('--phase', type=str, default='Train',\n # Train/train, Test/test, Predict/predict, Finetune/finetune\n help='training, test, predicting or finetuning')\n parser.add_argument('--epoch-id', type=int, default=399, metavar='N',\n help='id of the testing model')\n parser.add_argument('--checkpoint', type=str,\n default='trained_models\\\\detector_epoch_299.pt',\n help='run the training from specified checkpoint')\n parser.add_argument('--retrain', action='store_true', default=True,\n help='start training at checkpoint')\n\n args = parser.parse_args()\n\n # 2、程序控制代码\n # 2.1 用同样的随机初始化种子保证结果可复现\n torch.manual_seed(args.seed)\n # 2.2 设置GPU\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n device = torch.device('cuda' if use_cuda else 'cpu')\n # kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n # 2.3 读取数据\n print('==> Loading Datasets')\n train_set, test_set = get_train_test_set()\n train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.train_batch_size, shuffle=True)\n valid_loader = torch.utils.data.DataLoader(test_set, batch_size=args.test_batch_size)\n # 2.4 将数据/网络传入CPU/GPU\n print('===> Building Model')\n net = Net()\n model = net.to(device)\n if args.phase == 'Test' or args.phase == 'test' or \\\n args.phase == 'Predict' or args.phase == 'predict' or \\\n args.phase == 'Finetune' or args.phase == 'finetune':\n model_name = os.path.join(args.save_directory,\n 'detector_epoch' + '_' + str(args.epoch_id) + '.pt')\n model.load_state_dict(torch.load(model_name))\n\n # 3、定义损失函数和优化器\n # 3.1 均方损失函数\n criterion = nn.MSELoss()\n # 3.2 SGD+Momentum\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n # optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9,0.99))\n\n # 4、定义程序所处阶段\n # 4.1 train\n if args.phase == 'Train' or args.phase == 'train':\n print('===> Start Training')\n if args.retrain:\n model.load_state_dict(torch.load(args.checkpoint))\n print(\"Training from checkpoint %s\" % args.checkpoint)\n train_losses,valid_losses = train(args, train_loader, valid_loader, model, criterion, optimizer, device)\n print('====================================================')\n loss_show(train_losses, valid_losses,args)\n\n # 4.2 test\n elif args.phase == 'Test' or args.phase == 'test':\n print('===> Start Testing')\n with torch.no_grad():\n result, valid_mean_pts_loss = test(valid_loader, model, criterion, device)\n print(valid_mean_pts_loss)\n # 利用预测关键点随机作出图像与真实值对比\n result_show(result)\n\n # 4.3 finetune\n elif args.phase == 'Finetune' or args.phase == 'finetune':\n print('===> Finetune')\n train_losses, valid_losses = Finetune(args, train_loader, valid_loader, model, criterion, device)\n print(\"Learning Rate:\", args.lr, \"Epoch:\", args.epochs, \"Seed:\",\n args.seed, \"Batch_Size:\", args.batch_size)\n loss_show(train_losses, valid_losses, args)\n print('====================================================')\n\n # 4.4 predict\n elif args.phase == 'Predict' or args.phase == 'predict':\n print('===> Predict')\n predict(model, valid_loader,device)\n\n# 3. 训练阶段\n# 3.1 训练函数\ndef train(args,train_loader,valid_loader,model,criterion,optimizer,device):\n # 设定保存\n global loss\n if args.save_model:\n if not os.path.exists(args.save_directory):\n os.makedirs(args.save_directory)\n # 设定训练次数、损失函数\n epoch = args.epochs\n criterion = criterion\n # monitor training loss\n train_losses = []\n valid_losses = []\n log_path = os.path.join(args.save_directory, 'log_info.txt')\n if (os.path.exists(log_path)):\n os.remove(log_path)\n # 训练\n for epoch_id in range(epoch):\n # training the model\n model.train()\n log_lines = []\n if epoch_id > 0:\n optimizer = optim.SGD(model.parameters(), lr=0.0001, momentum=0.9)\n if epoch_id > 300:\n optimizer = optim.SGD(model.parameters(), lr=0.00001, momentum=0.5)\n\n for batch_idx,batch in enumerate(train_loader):\n img = batch['image']\n input_img = img.to(device)\n # ground truth\n landmarks = batch['landmarks']\n target_pts = landmarks.to(device)\n # clear the gradients of all optimized variables\n optimizer.zero_grad()\n # get output\n output_pts = model(input_img)\n # get loss\n loss = criterion(output_pts,target_pts)\n # do BP automatically\n loss.backward()\n optimizer.step()\n # show log info\n if batch_idx % args.log_interval == 0:\n log_line = 'Train Epoch:{}[{}/{}({:.0f}%)]\\t pts_loss:{:.6f}'.format(\n epoch_id,\n batch_idx * len(img), # 批次序号*一批次样本数=已测样本数\n len(train_loader.dataset), # 总train_set样本数量\n 100.*batch_idx/len(train_loader), # 以上两者之比\n loss.item()\n )\n print(log_line)\n log_lines.append(log_line)\n train_losses.append(loss)\n # 验证(使用测试数据集)\n valid_mean_pts_loss = 0.0\n model.eval()# prep model for evaluation\n with torch.no_grad():\n valid_batch_cnt = 0\n for batch_idx,batch in enumerate(valid_loader):\n valid_batch_cnt += 1\n valid_img = batch['image']\n input_img = valid_img.to(device)\n # ground truth\n landmarks = batch['landmarks']\n target_pts = landmarks.to(device)\n # result\n output_pts = model(input_img)\n valid_loss = criterion(output_pts,target_pts)\n valid_mean_pts_loss += valid_loss.item()\n #结论输出\n valid_mean_pts_loss /= valid_batch_cnt * 1.0\n log_line = 'Valid: pts_loss: {:.6f}'.format(valid_mean_pts_loss)\n print(log_line)\n log_lines.append(log_line)\n valid_losses.append(valid_mean_pts_loss)\n print('=============================')\n # save model\n if args.save_model:\n saved_model_name = os.path.join(args.save_directory,\n 'detector_epoch'+'_'+str(epoch_id)+'.pt')\n torch.save(model.state_dict(), saved_model_name)\n # write log info\n with open(log_path, \"a\") as f:\n for line in log_lines:\n f.write(line + '\\n')\n return train_losses,valid_losses\n\n# 3.2 loss作图\ndef loss_show(train_losses,valid_losses,args):\n x = np.arange(0,args.epochs)\n train_losses = np.array(train_losses)\n t_loss = np.c_[x,train_losses]\n valid_losses = np.array(valid_losses)\n v_loss = np.c_[x, valid_losses]\n fig = plt.figure(figsize=(10, 10))\n ax = fig.subplots(nrows=1, ncols=1)\n ax.plot(t_loss[:,0],t_loss[:,1],color='red')\n ax.plot(v_loss[:, 0], v_loss[:, 1], color='green')\n plt.show()\n\n# 4. 测试阶段\n# 4.1 计算测试集上预测关键点\ndef test(valid_loader,model,criterion,device):\n model.eval()\n valid_mean_pts_loss = 0.0\n valid_batch_cnt = 0\n result = []\n for batch_idx, batch in enumerate(valid_loader):\n valid_batch_cnt += 1\n valid_img = batch['image']\n input_img = valid_img.to(device)\n # ground truth\n landmarks = batch['landmarks']\n target_pts = landmarks.to(device)\n # result\n output_pts = model(input_img)\n valid_loss = criterion(output_pts, target_pts)\n valid_mean_pts_loss += valid_loss.item()\n device2 = torch.device('cpu')\n output_pts = output_pts.to(device2)\n for i in range(len(valid_img)):\n sample = {\n 'image': valid_img[i],\n 'landmarks': output_pts[i],\n 'landmarks_truth': landmarks[i]\n }\n result.append(sample)\n # 计算loss值\n valid_mean_pts_loss /= valid_batch_cnt * 1.0\n return result, valid_mean_pts_loss\n\n# 4.2 利用预测关键点作出图像与真实值对比\ndef result_show(data):\n indexes = np.random.randint(0,len(data),3)\n fig = plt.figure(figsize=(10,10))\n axes = fig.subplots(nrows=1,ncols=3)\n for i in range(3):\n sample = data[indexes[i]]\n ax = axes[i]\n img = sample['image']\n img = img[0]\n landmarks = sample['landmarks']\n landmarks = landmarks.reshape(-1, 2)\n gt_lms = sample['landmarks_truth']\n gt_lms = gt_lms.reshape(-1,2)\n ax.imshow(img,cmap='gray')\n ax.scatter(landmarks[:,0],landmarks[:,1],s=5,c='r')\n ax.scatter(gt_lms[:, 0], gt_lms[:, 1], s=5, c='g')\n plt.show()\n\n# 5. 预测阶段\ndef predict(model, valid_loader,device):\n model.eval()\n with torch.no_grad():\n for i, batch in enumerate(valid_loader):\n # forward pass: compute predicted outputs by passing inputs to the model\n img = batch['image']\n landmark = batch['landmarks']\n img = img.to(device)\n print('i: ', i)\n # generated\n output_pts = model(img)\n device2 = torch.device('cpu')\n output_pts = output_pts.to(device2)\n outputs = output_pts.numpy()[0]\n print('outputs: ', outputs)\n x = list(map(int, outputs[0: len(outputs): 2]))\n y = list(map(int, outputs[1: len(outputs): 2]))\n landmarks_generated = list(zip(x, y))\n # truth\n landmark = landmark.numpy()[0]\n x = list(map(int, landmark[0: len(landmark): 2]))\n y = list(map(int, landmark[1: len(landmark): 2]))\n landmarks_truth = list(zip(x, y))\n\n device2 = torch.device('cpu')\n img = img.to(device2)\n img = img.numpy()[0].transpose(1, 2, 0)\n img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n for landmark_truth, landmark_generated in zip(landmarks_truth, landmarks_generated):\n cv2.circle(img, tuple(landmark_truth), 2, (0, 0, 255), -1)\n cv2.circle(img, tuple(landmark_generated), 2, (0, 255, 0), -1)\n\n cv2.imshow(str(i), img)\n key = cv2.waitKey()\n if key == 27:\n exit()\n cv2.destroyAllWindows()\n\n# 6. 微调阶段\ndef Finetune(args, train_loader, valid_loader, model, criterion, device):\n global loss\n print(\"Finetuning the models from checkpoint %s\" % args.checkpoint)\n # 设定保存\n if args.save_model:\n if not os.path.exists(args.save_directory):\n os.makedirs(args.save_directory)\n # 设定训练次数、损失函数\n epoch = args.epochs\n criterion = criterion\n # 设置冻结层\n for para in list(model.parameters())[0:16]: #冻结IP2之上的参数\n para.requires_grad = False #取消自动求导\n # 设置只优化IP3 这一层 optimizer = optim.Adam(params=[model.ip3.weight, model.ip3.bias], lr=0.001, betas=(0.9, 0.99))\n optimizer = optim.Adam(model.parameters(), lr=0.001, betas=(0.9, 0.99))\n print(\"only the last layer -- IP3 -- will be trained, the other layers will be frozen\")\n # monitor training loss\n train_losses = []\n valid_losses = []\n log_path = os.path.join(args.save_directory, 'log_info.txt')\n if (os.path.exists(log_path)):\n os.remove(log_path)\n # 训练\n for epoch_id in range(epoch):\n # training the model\n model.train()\n log_lines = []\n for batch_idx,batch in enumerate(train_loader):\n img = batch['image']\n input_img = img.to(device)\n # ground truth\n landmarks = batch['landmarks']\n target_pts = landmarks.to(device)\n # clear the gradients of all optimized variables\n optimizer.zero_grad()\n # get output\n output_pts = model(input_img)\n # get loss\n loss = criterion(output_pts,target_pts)\n # do BP automatically\n loss.backward()\n optimizer.step()\n # show log info\n if batch_idx % args.log_interval == 0:\n log_line = 'Train Epoch:{}[{}/{}({:.0f}%)]\\t pts_loss:{:.6f}'.format(\n epoch_id,\n batch_idx * len(img), # 批次序号*一批次样本数=已测样本数\n len(train_loader.dataset), # 总train_set样本数量\n 100.*batch_idx/len(train_loader), # 以上两者之比\n loss.item()\n )\n print(log_line)\n log_lines.append(log_line)\n train_losses.append(loss)\n # 验证(使用测试数据集)\n valid_mean_pts_loss = 0.0\n model.eval()# prep model for evaluation\n with torch.no_grad():\n valid_batch_cnt = 0\n for batch_idx,batch in enumerate(valid_loader):\n valid_batch_cnt += 1\n valid_img = batch['image']\n input_img = valid_img.to(device)\n # ground truth\n landmarks = batch['landmarks']\n target_pts = landmarks.to(device)\n # result\n output_pts = model(input_img)\n valid_loss = criterion(output_pts,target_pts)\n valid_mean_pts_loss += valid_loss.item()\n #结论输出\n valid_mean_pts_loss /= valid_batch_cnt * 1.0\n log_line = 'Valid: pts_loss: {:.6f}'.format(valid_mean_pts_loss)\n print(log_line)\n log_lines.append(log_line)\n valid_losses.append(valid_mean_pts_loss)\n print('=============================')\n # save model\n if args.save_model:\n saved_model_name = os.path.join(args.save_directory,\n 'detector_epoch'+'_'+str(epoch_id)+'.pt')\n torch.save(model.state_dict(), saved_model_name)\n # write log info\n with open(log_path, \"a\") as f:\n for line in log_lines:\n f.write(line + '\\n')\n return train_losses,valid_losses\n\n# 7. 运行\nif __name__ == '__main__':\n main_test()\n", "sub_path": "stage1/Project2_detector_myself - stage1.py", "file_name": "Project2_detector_myself - stage1.py", "file_ext": "py", "file_size_in_byte": 18611, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "torch.nn.Module", "line_number": 13, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 18, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 20, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 24, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 39, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 41, "usage_type": "name"}, {"api_name": "torch.nn.PReLU", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.AvgPool2d", "line_number": 43, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 43, "usage_type": "name"}, {"api_name": "argparse.ArgumentParser", "line_number": 78, "usage_type": "call"}, {"api_name": "torch.manual_seed", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 117, "usage_type": "call"}, {"api_name": "Project2_data_myself.get_train_test_set", "line_number": 121, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 122, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 122, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 123, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 123, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "torch.load", "line_number": 133, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 137, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 139, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 156, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 182, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 190, "usage_type": "call"}, {"api_name": "os.path", "line_number": 190, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 191, "usage_type": "call"}, {"api_name": "os.path", "line_number": 191, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.optim.SGD", "line_number": 199, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 199, "usage_type": "name"}, {"api_name": "torch.optim.SGD", "line_number": 201, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 201, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 255, "usage_type": "call"}, {"api_name": "os.path", "line_number": 255, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 266, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 268, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 270, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 271, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 275, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 275, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 310, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 310, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 311, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 311, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 325, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 325, "usage_type": "name"}, {"api_name": "torch.no_grad", "line_number": 330, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 339, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 352, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 355, "usage_type": "call"}, {"api_name": "cv2.COLOR_GRAY2RGB", "line_number": 355, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 357, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 358, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 360, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 361, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 364, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 372, "usage_type": "call"}, {"api_name": "os.path", "line_number": 372, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 373, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 381, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 381, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 386, "usage_type": "call"}, {"api_name": "os.path", "line_number": 386, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 387, "usage_type": "call"}, {"api_name": "os.path", "line_number": 387, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 388, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 424, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 446, "usage_type": "call"}, {"api_name": "os.path", "line_number": 446, "usage_type": "attribute"}, {"api_name": "torch.save", "line_number": 448, "usage_type": "call"}]} +{"seq_id": "410673343", "text": "from bs4 import BeautifulSoup\nimport requests\n#import urllib\n\n\n\n#specify the url\nurl1 = \"https://www.icc-cricket.com/rankings/mens/team-rankings/odi\"\nsource = requests.get(url1).text\n#or\n#source = urllib.request.urlopen(wiki)\n\nsoup = BeautifulSoup(source,\"lxml\")\n\nprint (soup.prettify())\n\nall_tables=soup.find_all('table')\n\nprint (all_tables)\n\n_table=soup.find('table', class_='table')\n\n\n\n#Generate lists\nA=[]\nB=[]\nC=[]\nD=[]\nE=[]\ntb=_table.find(\"tbody\")\n\nfor row in tb.findAll('tr'):\n cells = row.findAll('td')\n \n A.append(cells[0].text.strip())\n B.append(cells[1].text.strip())\n C.append(cells[2].text.strip())\n D.append(cells[3].text.strip())\n E.append(cells[4].text.strip())\n \nimport pandas as pd\nfrom collections import OrderedDict\n\ncol_name = [\"position\",\"team\",\"weighted matches\",\"points\",\"rating\"]\ncol_data = OrderedDict(zip(col_name,[A,B,C,D,E]))\ndf = pd.DataFrame(col_data) \ndf.to_csv(\"team.csv\")", "sub_path": "day9/untitled2.py", "file_name": "untitled2.py", "file_ext": "py", "file_size_in_byte": 937, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "requests.get", "line_number": 9, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 13, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 46, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "229312656", "text": "import numpy as np;\nimport matplotlib.pyplot as plt;\nfrom sklearn.model_selection import train_test_split;\n\ndef normalize(X): \n mins = np.min(X, axis = 0) \n maxs = np.max(X, axis = 0) \n rng = maxs - mins;\n norm_X = 1 - ((maxs - X)/rng)\n return norm_X,maxs,rng;\n\ndef logistic_expression(X,theta):\n h = 1/(1+np.exp((-1)*np.dot(X,theta)));\n return h;\n\ndef cost_function(X,Y,theta,h):\n J = (-1) * (np.dot(Y.T,np.log(h))+np.dot((1-Y).T,np.log(1-h)));\n return np.mean(J);\n\ndef gradient(X,Y,h):\n grad = np.dot(X.T,h-Y);\n return grad;\n\ndef gradient_descent(X,Y,theta,m,alpha = 0.01):\n h = logistic_expression(X,theta);\n cost = cost_function(X,Y,theta,h);\n change = 1;\n iterations = 1;\n while(change > 0.000001):\n old_cost = cost;\n theta = theta - (alpha * gradient(X,Y,h))/1;\n h = logistic_expression(X,theta);\n cost = cost_function(X,Y,theta,h);\n change = old_cost-cost;\n iterations+=1;\n return theta, iterations;\n\ndef plot_reg(X, Y, theta,m): \n y = [None]*m;\n for i in range(0,m):\n if Y[i][0] == 0:\n y[i] = 'b';\n else:\n y[i] = 'r';\n \n X = X.T;\n plt.scatter(X[1],X[2],c=y,s=20);\n \n # plotting decision boundary \n x1 = np.arange(30, 100, 1) \n x2 = -(theta[0][0] + theta[1][0]*x1)/theta[2][0] \n plt.plot(x1, x2, c='g',linewidth=1.0) \n \n plt.xlabel('x1') \n plt.ylabel('x2') \n #plt.legend() \n plt.show()\n \ndef original(xmax,xrng,theta):\n beta = np.array([[0],[0],[0]],dtype='float64');\n beta[0][0] = np.sum(theta)-(theta[1][0]*xmax[0])/xrng[0]-(theta[2][0]*xmax[1])/xrng[1];\n beta[1][0] = theta[1][0]/xrng[0];\n beta[2][0] = theta[2][0]/xrng[1];\n return beta\n \ndef probability(theta,X_test,Y_test,xmax,xrng):\n X_local = 1 - ((xmax - X_test)/xrng);\n pred_prob = logistic_expression(np.hstack((np.array([np.ones(X_local.shape[0])]).T,X_local)),theta) \n pred_value = np.where(pred_prob >= .5, 1, 0)\n m=np.size(pred_value)\n for i in range(m):\n print('Applicant with scores {:2f} : {:2f} has probability of {:2f}'.format(X_test[i][0],X_test[i][1],pred_prob[i][0]))\n \ndef predict(xmax,xrng,theta):\n try:\n x1,x2 = input('Enter scores in a line with space in btwn:').split(); \n except ValueError:\n print('Error: Please input two numbers in a line with space');\n predict(xmax,xrng,theta)\n return ;\n except:\n return ;\n X = np.array([float(x1),float(x2)]);\n X = 1 - ((xmax - X)/xrng);\n X = np.hstack((np.array([1]).T,X));\n print('->Probability: {:2f}'.format(logistic_expression(X,theta)[0]));\n\nif __name__=='__main__':\n data = np.loadtxt('../courseera/machine-learning-ex2/ex2/ex2data1.txt',delimiter=',');\n m = data.shape[0];\n X,xmax,xrng = normalize(data[:, :-1]) \n X = np.hstack((np.array([np.ones(m)]).T,X));\n Y = data[:,2:3]\n theta = np.array([np.zeros(np.size(X[0]))]).T;\n theta,iterations = gradient_descent(X,Y,theta,m);\n \n plot_reg(np.hstack((np.array([np.ones(m)]).T,data)),Y,original(xmax,xrng,theta),m);\n print('theta:',theta.T);\n print('no of iterations',iterations);\n print('-------------------------------------------------------------------');\n \n X_train, X_test, Y_train, Y_test = train_test_split( data[:,0:2],Y,test_size=0.05);\n #random test cases\n probability(theta,X_test,Y_test,xmax,xrng);\n print('-------------------------------------------------------------------');\n #input test case\n predict(xmax,xrng,theta)\n \n \n \n \n \n", "sub_path": "Phase 2/week 3 - 28 Jan 2019/khagesh_170102070/ex2data1.py", "file_name": "ex2data1.py", "file_ext": "py", "file_size_in_byte": 3575, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.min", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 47, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 47, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 61, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 92, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.size", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 97, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 102, "usage_type": "call"}]} +{"seq_id": "408024086", "text": "import numpy as np\nimport matplotlib as mp\nimport matplotlib.pyplot as plt, mpld3\nimport matplotlib.ticker as ticker\n\nimport pickle\nfrom .. import utils\n\numnPrimaryColors = [[122 / 225, 0 / 225, 25 / 225],\n [255 / 255, 204 / 225, 51 / 255]]\n\numnSecondaryDark = [[0 / 255, 61 / 255, 76 / 255],\n [202 / 255, 119 / 255, 0 / 255],\n [114 / 255, 110 / 255, 32 / 255],\n [97 / 255, 99 / 255, 101 / 255],\n [145 / 255, 120 / 255, 91 / 255],\n [91 / 255, 0 / 255, 19 / 255],\n [255 / 255, 183 / 255, 30 / 255]]\n\n\numnSecondaryLight = [[0 / 255, 185 / 255, 228 / 255],\n [233 / 255, 131 / 255, 0 / 255],\n [190 / 255, 214 / 255, 0 / 255],\n [204 / 255, 204 / 255, 204 / 255],\n [226 / 255, 211 / 255, 164 / 255],\n [144 / 255, 0 / 255, 33 / 255],\n [255 / 255, 222 / 255, 122 / 255]]\n\n\ndef outputPlots(\n useINF,\n resultsDict,\n saveOutput=True,\n outputDir=None,\n axisDict=None,\n plotPropagationError=False,\n scaleByStdDev=None,\n lineWeight=2,\n legendFont=14,\n legendLineLength=10,\n legendBorderPad=2,\n outputFormat='HTML',\n clearOldPlot=True,\n placeLegend=False,\n logErrorPlot=False,\n colorCounter=0,\n colorList=None\n):\n if not colorList:\n colorList = umnSecondaryDark\n \n print()\n print(\"||=================================================||\") \n print(\"Plotting current results and saving output\")\n print(\"||=================================================||\") \n print()\n legendDict = {}\n\n if axisDict == None:\n axisDict = {}\n\n if saveOutput and outputFormat == 'SVG':\n mp.rcParams['svg.fonttype'] = 'none'\n mp.rcParams['axes.unicode_minus'] = False\n plt.rc('text', usetex=False)\n\n if axisDict is None or 'attAxis' not in axisDict:\n attitudeFigure=plt.figure(figsize=(16,9))\n print(\"generating new attitude figure\")\n if placeLegend:\n rollAxis = plt.subplot2grid((3,4), (0,0),colspan=3)\n pitchAxis = plt.subplot2grid((3,4), (1,0),colspan=3)\n yawAxis = plt.subplot2grid((3,4), (2,0),colspan=3)\n else:\n rollAxis = plt.subplot2grid((3,1), (0,0))\n pitchAxis = plt.subplot2grid((3,1), (1,0))\n yawAxis = plt.subplot2grid((3,1), (2,0))\n else:\n rollAxis = axisDict['attAxis']['roll']\n pitchAxis = axisDict['attAxis']['pitch']\n yawAxis = axisDict['attAxis']['yaw']\n \n # plt.sca(attAxis)\n\n if clearOldPlot:\n rollAxis.clear()\n pitchAxis.clear()\n yawAxis.clear()\n \n estimatedT = resultsDict['estimatedT']['value']\n rollSigma = resultsDict['estimatedAttitudeSigma_DEG']['value'][0]\n pitchSigma = resultsDict['estimatedAttitudeSigma_DEG']['value'][1]\n yawSigma = resultsDict['estimatedAttitudeSigma_DEG']['value'][2]\n \n rollError = resultsDict['attitudeError_DEG']['value'][0]\n pitchError = resultsDict['attitudeError_DEG']['value'][1]\n yawError = resultsDict['attitudeError_DEG']['value'][2]\n\n if 'attitudeError_DEG_PO' in resultsDict:\n attPO = True\n rollError_PO = resultsDict['attitudeError_DEG_PO']['value'][0]\n pitchError_PO = resultsDict['attitudeError_DEG_PO']['value'][1]\n yawError_PO = resultsDict['attitudeError_DEG_PO']['value'][2]\n else:\n attPO = False\n \n estimatedPos = resultsDict['estimatedPos']['value']\n estimatedPosStdDev = resultsDict['estimatedPosStdDev']['value']\n estimatedPosStdDev_calc = resultsDict['estimatedPosStdDev_calc']['value']\n\n if 'navVel' in resultsDict:\n navVel = resultsDict['navVel']['value']\n navVelStd = resultsDict['navVelStd']['value']\n else:\n navVel = None\n if 'navAcc' in resultsDict:\n navAcc = resultsDict['navAcc']['value']\n navAccStd = resultsDict['navAccStd']['value']\n else:\n navAcc=None\n\n\n truePos = resultsDict['truePos']['value']\n trueVel = resultsDict['trueVel']['value']\n trueAcc = resultsDict['trueAcc']['value']\n\n velocityOnlyRange = resultsDict['velocityOnlyRange']['value']\n \n \n stdDevColor = [0.5, 0.5, 0.5]\n legendLabelList = []\n legendLineList = []\n \n # rollAxis.set_title(r'Roll error, \\$\\pm 1 \\sigma\\$ bounds')\n estLine, = rollAxis.plot(\n estimatedT - estimatedT[0],\n rollError,\n color=colorList[0 + colorCounter],\n lw=lineWeight\n )\n legendLineList.append(estLine)\n legendLabelList.append('attitude estimate error')\n if attPO and plotPropagationError:\n propLine,=rollAxis.plot(\n estimatedT - estimatedT[0],\n rollError_PO,\n color=colorList[1 + colorCounter],\n ls='dashdot',\n lw=lineWeight\n )\n legendLineList.append(propLine)\n legendLabelList.append('inertial propagation error')\n \n sigmaLine,=rollAxis.plot(\n estimatedT-estimatedT[0],\n -rollSigma,\n color=stdDevColor,\n ls='dotted',\n lw=lineWeight\n )\n legendLineList.append(sigmaLine)\n legendLabelList.append(r'\\$\\pm 1 \\sigma\\$')\n \n \n rollAxis.plot(\n estimatedT-estimatedT[0],\n rollSigma,\n color=stdDevColor,\n ls='dotted',\n lw=lineWeight\n )\n rollAxis.set_ylabel(r'roll error \\$(deg)\\$')\n rollAxis.grid()\n\n if scaleByStdDev:\n myStdDev = np.std(rollError)\n myMean = np.mean(rollError)\n rollAxis.set_ylim([-scaleByStdDev*myStdDev + myMean, scaleByStdDev*myStdDev + myMean])\n \n \n # if placeLegend:\n # pitchAxis=plt.subplot2grid((3,4), (1,0),colspan=3)\n # else:\n # pitchAxis=plt.subplot2grid((3,1), (1,0))\n \n # pitchAxis.set_title(r'Pitch error, \\$\\pm 1 \\sigma\\$ bounds')\n\n pitchAxis.plot(\n estimatedT-estimatedT[0],\n pitchError,\n color=colorList[0+colorCounter],\n lw=lineWeight\n )\n if attPO and plotPropagationError:\n pitchAxis.plot(\n estimatedT - estimatedT[0],\n pitchError_PO,\n color=colorList[1+ colorCounter],\n lw=lineWeight,\n ls='dashdot'\n )\n \n pitchAxis.plot(\n estimatedT-estimatedT[0],\n pitchSigma,\n color=stdDevColor,\n ls='dotted',\n lw=lineWeight\n )\n pitchAxis.plot(\n estimatedT-estimatedT[0],\n -pitchSigma,\n color=stdDevColor,\n ls='dotted',\n lw=lineWeight\n )\n pitchAxis.set_ylabel(r'pitch error \\$(deg)\\$')\n\n if scaleByStdDev:\n myStdDev = np.std(pitchError)\n myMean = np.mean(pitchError)\n pitchAxis.set_ylim([-scaleByStdDev*myStdDev + myMean, scaleByStdDev*myStdDev + myMean])\n \n pitchAxis.grid()\n \n # if placeLegend:\n # yawAxis=plt.subplot2grid((3,4), (2,0),colspan=3)\n # else:\n # yawAxis=plt.subplot2grid((3,1), (2,0))\n \n # yawAxis.set_title(r'Yaw error, \\$\\pm 1 \\sigma\\$ bounds')\n yawAxis.plot(\n estimatedT-estimatedT[0],\n yawError,\n color=colorList[0 + colorCounter],\n lw=lineWeight \n )\n if attPO and plotPropagationError:\n yawAxis.plot(\n estimatedT - estimatedT[0],\n yawError_PO,\n color=colorList[1 + colorCounter],\n lw=lineWeight,\n ls='dashdot' \n )\n \n yawAxis.plot(\n estimatedT-estimatedT[0],\n yawSigma,\n color=stdDevColor,\n ls='dotted',\n lw=lineWeight\n )\n yawAxis.plot(\n estimatedT-estimatedT[0],\n -yawSigma,\n color=stdDevColor,\n ls='dotted',\n lw=lineWeight \n )\n yawAxis.grid()\n yawAxis.set_ylabel(r'yaw error \\$(deg)\\$')\n yawAxis.set_xlabel(r'time \\$(s)\\$')\n if scaleByStdDev:\n myStdDev = np.std(yawError)\n myMean = np.mean(yawError)\n yawAxis.set_ylim([-scaleByStdDev*myStdDev + myMean, scaleByStdDev*myStdDev + myMean])\n # plt.subplots_adjust(hspace=.5)\n\n\n rollAxis.yaxis.set_major_formatter(ticker.FormatStrFormatter('%2.2f'))\n pitchAxis.yaxis.set_major_formatter(ticker.FormatStrFormatter('%2.2f'))\n yawAxis.yaxis.set_major_formatter(ticker.FormatStrFormatter('%2.2f'))\n\n rollAxis.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.f'))\n pitchAxis.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.f'))\n yawAxis.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.f'))\n \n rollAxis.yaxis.set_minor_formatter(ticker.FormatStrFormatter('%2.2f'))\n pitchAxis.yaxis.set_minor_formatter(ticker.FormatStrFormatter('%2.2f'))\n yawAxis.yaxis.set_minor_formatter(ticker.FormatStrFormatter('%2.2f'))\n\n rollAxis.xaxis.set_minor_formatter(ticker.FormatStrFormatter('%0.f'))\n pitchAxis.xaxis.set_minor_formatter(ticker.FormatStrFormatter('%0.f'))\n yawAxis.xaxis.set_minor_formatter(ticker.FormatStrFormatter('%0.f'))\n plt.tight_layout()\n\n if placeLegend:\n # plt.subplot2grid((3,4), (3,0),colspan=3) \n legendPlot = plt.subplot2grid((3,4), (0,3),rowspan=3)\n legendPlot.axis('off')\n myLegend=legendPlot.legend(\n legendLineList,\n legendLabelList,\n bbox_to_anchor=(1, 1),\n fancybox=True,\n shadow=True,\n handlelength=legendLineLength,\n borderpad=legendBorderPad,\n )\n plt.setp(myLegend.get_texts(), fontsize=30)\n \n else:\n legendDict['attitude'] = {'lines': legendLineList, 'labels': legendLabelList}\n \n if saveOutput:\n if outputFormat == 'HTML':\n mpld3.save_html(rollAxis.get_figure(), outputDir + '/attitude.html')\n elif outputFormat == 'SVG':\n plt.savefig(outputDir + '/acceleration.svg')\n \n plt.show(block=False)\n \n if axisDict is None or 'tdoaAxis' not in axisDict:\n tdoaFigure=plt.figure(figsize=(16,9))\n tdoaAxis = plt.gca()\n else:\n tdoaAxis = axisDict['tdoaAxis']\n legendLineList = []\n legendLabelList = []\n \n if clearOldPlot:\n tdoaAxis.clear()\n\n propOnlyRange = truePos - (velocityOnlyRange) - truePos[0]\n rangeError = truePos - estimatedPos\n if logErrorPlot:\n tdoaAxis.set_yscale('log')\n propOnlyRange = np.abs(propOnlyRange)\n rangeError = np.abs(rangeError)\n \n \n tdoaLine, = tdoaAxis.plot(\n estimatedT,\n rangeError,\n color=colorList[0+colorCounter], \n lw=lineWeight\n )\n legendLineList.append(tdoaLine)\n legendLabelList.append(r'range error (\\$\\sigma=%.2f\\$)'%estimatedPosStdDev_calc)\n\n sigmaLine, = tdoaAxis.plot(\n estimatedT,\n estimatedPosStdDev,\n color=stdDevColor,\n ls='dotted',\n lw=lineWeight \n )\n legendLineList.append(sigmaLine)\n legendLabelList.append(r'estimated standard deviation (\\$1\\sigma\\$)')\n\n if not logErrorPlot:\n tdoaAxis.plot(\n estimatedT,\n -estimatedPosStdDev,\n color=stdDevColor,\n ls='dotted',\n lw=lineWeight \n )\n tdoaAxis.yaxis.set_major_formatter(ticker.FormatStrFormatter('%2.f'))\n tdoaAxis.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.f'))\n \n tdoaAxis.yaxis.set_minor_formatter(ticker.FormatStrFormatter('%2.f'))\n tdoaAxis.xaxis.set_minor_formatter(ticker.FormatStrFormatter('%0.f'))\n\n\n # if useINF:\n # plt.plot(\n # estimatedT,\n # truePos - navPos,\n # label=(\n # 'nav filter delay error, ($\\sigma = %s$)'\n # %navPosErrorStdDev\n # )\n # )\n # plt.plot(\n # estimatedT,\n # navBiasState,\n # label='bias state'\n # )\n # plt.plot(estimatedT, navPosStd, color=[0.9,0.9,0.9], label='nav filter standard deviation')\n # plt.plot(estimatedT, -navPosStd, color=[0.9,0.9,0.9])\n\n tdoaAxis.set_xlabel('time \\$(s)\\$')\n tdoaAxis.set_ylabel(r'TDOA error \\$(km)\\$')\n \n if plotPropagationError:\n tdoaPropOnlyLine, = tdoaAxis.plot(\n estimatedT,\n propOnlyRange,\n color=colorList[1 + colorCounter],\n ls='dashdot',\n lw=lineWeight\n )\n legendLineList.append(tdoaPropOnlyLine)\n legendLabelList.append('initial velocity error propagation')\n\n if scaleByStdDev:\n myStdDev = np.std(truePos-estimatedPos)\n myMean = np.mean(truePos-estimatedPos)\n tdoaAxis.set_ylim([-scaleByStdDev*myStdDev + myMean, scaleByStdDev*myStdDev + myMean])\n\n if placeLegend:\n tdoaAxis.legend(\n legendLineList,\n legendLabelList,\n bbox_to_anchor=(1, 1),\n fancybox=True,\n shadow=True,\n handlelength=legendLineLength,\n borderpad=legendBorderPad,\n )\n legendDict['tdoa'] = {'lines': legendLineList, 'labels': legendLabelList}\n \n tdoaAxis.grid()\n if saveOutput:\n if outputFormat == 'HTML':\n mpld3.save_html(tdoaAxis.get_figure(), outputDir + '/tdoa.html')\n elif outputFormat == 'SVG':\n plt.savefig(outputDir + '/tdoa.svg')\n # plt.close(tdoaFigure)\n plt.show(block=False)\n \n axisDict['tdoaAxis'] = tdoaAxis\n axisDict['attAxis'] = {'roll': rollAxis, 'pitch': pitchAxis, 'yaw': yawAxis}\n\n \n velocityFigure = None\n if not np.any(navVel==None):\n if axisDict is None or 'velocityAxis' not in axisDict:\n velocityFigure=plt.figure(figsize=(16,9))\n velocityAxis = plt.gca()\n else:\n velocityAxis = axisDict['velocityAxis']\n if clearOldPlot:\n velocityAxis.clear()\n velError = trueVel - navVel\n if logErrorPlot:\n velError = np.abs(velError)\n velocityAxis.set_yscale('log')\n \n velocityAxis.plot(\n estimatedT,\n velError,\n label=(\n r'velocity error (\\$\\sigma = %s\\$)'\n %np.std(trueVel - navVel) \n ),\n color=colorList[0 + colorCounter],\n lw=lineWeight\n )\n velocityAxis.plot(estimatedT, navVelStd,ls='dotted', lw=lineWeight, color=[0.5,0.5,0.5], label='velocity std dev')\n\n if not logErrorPlot:\n velocityAxis.plot(estimatedT, -navVelStd, color=[0.5,0.5,0.5],ls='dotted', lw=lineWeight,)\n if scaleByStdDev:\n myStdDev = np.std(trueVel-navVel)\n myMean = np.mean(trueVel-navVel)\n velocityAxis.set_ylim([-scaleByStdDev*myStdDev + myMean, scaleByStdDev*myStdDev + myMean])\n \n velocityAxis.legend()\n\n if saveOutput:\n if outputFormat == 'HTML':\n mpld3.save_html(velocityAxis.get_figure(), outputDir + '/velocity.html')\n elif outputFormat == 'SVG':\n plt.savefig(outputDir + '/velocity.svg')\n # plt.close(velocityFigure)\n # else:\n velocityAxis.grid()\n plt.show(block=False)\n axisDict['velocityAxis'] = velocityAxis\n \n legendLineList = []\n legendLabelList = []\n \n if not np.any(navAcc==None):\n if axisDict is None or 'accelerationAxis' not in axisDict:\n accelerationFigure=plt.figure(figsize=(16,9))\n accelerationAxis = plt.gca()\n else:\n accelerationAxis = axisDict['accelerationAxis']\n \n if clearOldPlot:\n accelerationAxis.clear()\n accError = trueAcc - navAcc\n\n if logErrorPlot:\n accError = np.abs(accError)\n accelerationAxis.set_yscale('log')\n accelerationAxis.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.e'))\n accelerationAxis.xaxis.set_major_formatter(ticker.FormatStrFormatter('%0.e'))\n accLine, = accelerationAxis.plot(\n estimatedT,\n accError,\n label=(\n r'acceleration error'\n ),\n color=colorList[0 + colorCounter],\n lw=lineWeight \n )\n legendLineList.append(accLine)\n legendLabelList.append(r'acceleration error')\n \n accSigmaLine, = accelerationAxis.plot(estimatedT, navAccStd, color=[0.5,0.5,0.5], label='acceleration std dev',ls='dotted', lw=lineWeight,)\n if not logErrorPlot:\n accelerationAxis.plot(estimatedT, -navAccStd, color=[0.5,0.5,0.5],ls='dotted', lw=lineWeight,)\n legendLineList.append(accSigmaLine)\n legendLabelList.append(r'estimated standard deviation')\n accelerationAxis.set_ylabel(r'Acceleration error \\$\\frac{km}{s^2}\\$')\n accelerationAxis.set_xlabel(r'time \\$(s)\\$')\n\n if placeLegend:\n # plt.subplot2grid((3,4), (3,0),colspan=3) \n legendPlot = plt.subplot2grid((3,4), (0,3),rowspan=3)\n legendPlot.axis('off')\n myLegend=legendPlot.legend(\n legendLineList,\n legendLabelList,\n bbox_to_anchor=(1, 1),\n fancybox=True,\n shadow=True,\n handlelength=legendLineLength,\n borderpad=legendBorderPad,\n )\n plt.setp(myLegend.get_texts(), fontsize=legendFont)\n\n else:\n legendDict['acceleration'] = {'lines': legendLineList, 'labels': legendLabelList}\n \n if scaleByStdDev:\n myStdDev = np.std(trueAcc-navAcc)\n myMean = np.mean(trueAcc-navAcc)\n accelerationAxis.set_ylim([-scaleByStdDev*myStdDev + myMean, scaleByStdDev*myStdDev + myMean])\n accelerationAxis.grid()\n if saveOutput:\n if outputFormat == 'HTML':\n mpld3.save_html(accelerationAxis.get_figure(), outputDir + '/acceleration.html')\n elif outputFormat == 'SVG':\n plt.savefig(outputDir + '/acceleration.svg')\n # plt.close(velocityFigure)\n # else:\n plt.show(block=False)\n axisDict['accelerationAxis'] = accelerationAxis\n \n # figureDict = {\n # 'tdoaFigure': tdoaFigure,\n # 'velocityFigure': velocityFigure,\n # 'attFigure': attFig,\n # 'accelerationFigure': accelerationFigure\n # }\n\n return(axisDict,legendDict)\n\ndef createStdDevHeader(\n inputRecord,\n):\n \n myTableString = (\n r'\\begin{tabular}{%' + '\\n' +\n r'>{\\raggedright}p{0.1\\textwidth}%' + '\\n' +\n r'>{\\raggedright}p{0.15\\textwidth}%' + '\\n' +\n r'>{\\raggedright}p{0.2\\textwidth}%' + '\\n' +\n r'>{\\raggedright}p{0.12\\textwidth}%' + '\\n' +\n r'>{\\raggedright}p{0.12\\textwidth}%' + '\\n' +\n r'p{0.12\\textwidth}}%' + '\\n' + r'\\toprule'\n )\n \n myTableString += r'\\multirow{3}{0.1\\textwidth}{Target object} & '\n \n \n myTableString += (\n r'\\multirow{3}{0.15\\textwidth}{' +\n r'Initial velocity error (' +\n inputRecord.parameters['parameters']['correlationFilter']['internalNavFilter']['initialVelocityStdDev']['unit'] +\n ')} & '\n )\n\n myTableString += (\n r'\\multirow{3}{0.2\\textwidth}{' +\n r'Angular velocity measurement error (' +\n inputRecord.parameters['parameters']['dynamicsModel']['omegaStdDev']['unit'] +\n ')} & '\n )\n \n myTableString += r'\\multicolumn{3}{c}{Initial attitude (' + inputRecord.parameters['parameters']['dynamicsModel']['initialAttitudeStdDev']['DEC']['unit'] + r')} \\\\' + '\\n'\n\n myTableString += r'\\cmidrule(l){4-6}' + '\\n'\n myTableString += r'& & & roll & pitch & yaw\\\\' + '\\n'\n myTableString += r'& & & & & \\\\' + '\\n'\n myTableString += r'\\midrule\\\\' + '\\n'\n return (myTableString)\n\ndef addInputToTable(\n resultsDict, \n inputRecord,\n header\n): \n if 'pulsarName' in resultsDict:\n header += resultsDict['pulsarName'] + r' & '\n else: \n header += inputRecord['parameters']['filesAndDirs']['targetObject']['value'] + r' & '\n \n header += r'\\multirow{2}{*}{%.2e} &' %inputRecord['parameters']['correlationFilter']['internalNavFilter']['initialVelocityStdDev']['value']\n \n header += r'\\multirow{2}{*}{%.2e} &' %inputRecord['parameters']['dynamicsModel']['omegaStdDev']['value']\n \n header += r'\\multirow{2}{*}{%.2e} &' %(inputRecord['parameters']['dynamicsModel']['initialAttitudeStdDev']['roll']['value'])\n header += r'\\multirow{2}{*}{%.2e} &' %(inputRecord['parameters']['dynamicsModel']['initialAttitudeStdDev']['DEC']['value'])\n header += r'\\multirow{2}{*}{%.2e}' %(inputRecord['parameters']['dynamicsModel']['initialAttitudeStdDev']['RA']['value'])\n \n header += r'\\\\'\n header += '\\n'\n if isinstance(inputRecord['parameters']['filesAndDirs']['observationID']['value'], list):\n header += r' (Obs. ID '\n firstObs = True\n for obsID in inputRecord['parameters']['filesAndDirs']['observationID']['value']:\n if not firstObs:\n header+=', '\n firstObs = False\n header+= r'%i' %obsID\n \n header += r')& & & & &\\\\'\n \n else:\n header += r' (Obs. ID %i)& & & & &\\\\' %inputRecord['parameters']['filesAndDirs']['observationID']['value']\n header += '\\n'\n return header\n \n\ndef createResultsHeader(\n resultsDict\n):\n myTableString = (\n r'\\begin{tabular}{%' + '\\n' +\n r'>{\\raggedright}p{0.1\\textwidth}%' + '\\n' +\n r'>{\\raggedright}p{0.15\\textwidth}%' + '\\n' +\n r'>{\\raggedright}p{0.2\\textwidth}%' + '\\n' +\n r'>{\\raggedright}p{0.12\\textwidth}%' + '\\n' +\n r'>{\\raggedright}p{0.12\\textwidth}%' + '\\n' +\n r'p{0.12\\textwidth}}%' + '\\n' + r'\\toprule'\n )\n \n # myTableString = (\n # r'\\begin{tabular}{%' + '\\n' +\n # r'>{\\raggedright}p{0.15\\textwidth}%' + '\\n' +\n # r'>{\\raggedright}p{0.1\\textwidth}%' + '\\n' +\n # r'>{\\raggedright}p{0.2\\textwidth}%' + '\\n' +\n # r'>{\\raggedright}p{0.1\\textwidth}%' + '\\n' +\n # r'>{\\raggedright}p{0.1\\textwidth}%' + '\\n' +\n # r'p{0.1\\columnwidth}}%' + '\\n' + r'\\toprule'\n # )\n\n myTableString += r'\\multirow{3}{0.15\\textwidth}{Target object} & '\n \n myTableString += (\n r'\\multirow{3}{0.1\\textwidth}{' +\n r'Runtime (' +\n resultsDict['estimatedT']['unit'] +\n ')} & '\n )\n \n myTableString += (\n r'\\multirow{3}{0.2\\textwidth}{' +\n r'Range estimate error standard deviation (' +\n resultsDict['estimatedPosStdDev_calc']['unit'] +\n ')} & '\n )\n\n myTableString += r'\\multicolumn{3}{c}{\\multirow{2}{0.36\\textwidth}{Attitude estimate error standard deviation (' + resultsDict['estimatedAttitudeSigma_DEG']['unit'] + r')}} \\\\' + '\\n'\n\n myTableString += r'& & & & & \\\\' + '\\n'\n myTableString += r'\\cmidrule(l){4-6}' + '\\n'\n\n myTableString += r'& & & roll & pitch & yaw\\\\' + '\\n'\n myTableString += r'\\midrule\\\\' + '\\n'\n return (myTableString)\n\ndef addResultsToTable(\n resultsDict, inputs, header\n):\n \n if 'pulsarName' in resultsDict:\n header += resultsDict['pulsarName'] + r' & '\n else: \n header += inputs['parameters']['filesAndDirs']['targetObject']['value'] + r' & '\n \n header += r'\\multirow{2}{*}{%.2e} &' %(resultsDict['estimatedT']['value'][-1] - resultsDict['estimatedT']['value'][0])\n \n header += r'\\multirow{2}{*}{%.2e} &' %resultsDict['estimatedPosStdDev_calc']['value']\n header += r'\\multirow{2}{*}{%.2e} &' %(np.std(resultsDict['attitudeError_DEG']['value'][0]) + np.abs(np.mean(resultsDict['attitudeError_DEG']['value'][0])))\n header += r'\\multirow{2}{*}{%.2e} &' %(np.std(resultsDict['attitudeError_DEG']['value'][1]) + np.abs(np.mean(resultsDict['attitudeError_DEG']['value'][1])))\n header += r'\\multirow{2}{*}{%.2e}' %(np.std(resultsDict['attitudeError_DEG']['value'][2]) + np.abs(np.mean(resultsDict['attitudeError_DEG']['value'][2])))\n header += r'\\\\'\n header += '\\n'\n if isinstance(inputs['parameters']['filesAndDirs']['observationID']['value'], list):\n header += r' (Obs. ID '\n firstObs = True\n for obsID in inputs['parameters']['filesAndDirs']['observationID']['value']:\n if not firstObs:\n header+=', '\n firstObs = False\n header+= r'%i' %obsID\n \n header += r')& & & &\\\\'\n \n else:\n header += r' (Obs. ID %i)& & & &\\\\' %inputs['parameters']['filesAndDirs']['observationID']['value']\n header += '\\n'\n return header\n\n\ndef createResultsDict(\n mySpacecraft,\n ureg,\n estimatedT,\n tdoa,\n attitude,\n velocityOnlyRangeTruncated,\n pulsarName,\n attitudePO=None,\n useINF=False,\n saveOutput=True,\n outputDir=None\n):\n rad2deg = 180/np.pi\n estimatedRoll = np.array(attitude['roll'])\n estimatedPitch = np.array(attitude['pitch'])\n estimatedYaw = np.array(attitude['yaw'])\n rollSigma = np.array(attitude['rollSigma'])\n pitchSigma = np.array(attitude['pitchSigma'])\n yawSigma = np.array(attitude['yawSigma'])\n\n trueAtt = np.array(mySpacecraft.dynamics.attitude(\n estimatedT+mySpacecraft.tStart, returnQ=False)\n )\n\n trueAtt_DEG = trueAtt * rad2deg\n\n estimateAttitude_DEG = [\n estimatedRoll*rad2deg,\n estimatedPitch*rad2deg,\n estimatedYaw*rad2deg\n ]\n\n \n estimatedAttitudeSigma_DEG = [\n rollSigma*rad2deg,\n pitchSigma*rad2deg,\n yawSigma*rad2deg\n ]\n\n rollError_DEG = np.array(utils.eulerAngleDiff(estimatedRoll, trueAtt[:,0])) * rad2deg\n pitchError_DEG = np.array(utils.eulerAngleDiff(estimatedPitch, trueAtt[:,1])) * rad2deg\n yawError_DEG = np.array(utils.eulerAngleDiff(estimatedYaw, trueAtt[:,2])) * rad2deg\n\n attitudeError_DEG = [rollError_DEG, pitchError_DEG, yawError_DEG]\n\n if attitudePO is not None:\n roll_PO = np.array(attitudePO['roll'])\n pitch_PO = np.array(attitudePO['pitch'])\n yaw_PO = np.array(attitudePO['yaw'])\n \n estimateAttitude_DEG_PO = [\n roll_PO*rad2deg,\n pitch_PO*rad2deg,\n yaw_PO*rad2deg\n ]\n\n rollError_DEG_PO = np.array(utils.eulerAngleDiff(roll_PO, trueAtt[:,0])) * rad2deg\n pitchError_DEG_PO = np.array(utils.eulerAngleDiff(pitch_PO, trueAtt[:,1])) * rad2deg\n yawError_DEG_PO = np.array(utils.eulerAngleDiff(yaw_PO, trueAtt[:,2])) * rad2deg\n\n attitudeError_DEG_PO = [rollError_DEG_PO, pitchError_DEG_PO, yawError_DEG_PO]\n else:\n attitudeError_DEG_PO = None\n \n estimatedTDOA = np.array(tdoa['TDOA'])\n estimatedTDOAStd = np.array(tdoa['TDOAStd'])\n\n trueTDOA = np.array([\n mySpacecraft.dynamics.position(t + mySpacecraft.tStart).dot(tdoa['unitVec']) for t in estimatedT\n ])\n trueVel = np.array([\n mySpacecraft.dynamics.velocity(t + mySpacecraft.tStart).dot(tdoa['unitVec']) for t in estimatedT\n ])\n \n trueAcc = np.array([\n mySpacecraft.dynamics.acceleration(t + mySpacecraft.tStart).dot(tdoa['unitVec']) for t in estimatedT\n ])\n\n truePos = trueTDOA - trueTDOA[0]\n \n\n if len(tdoa['vel'])>0:\n navVel = np.array(tdoa['vel'])\n navVelStd = np.array(tdoa['velStd'])\n navVelErrorStdDev = np.std(navVel - trueVel)\n \n if len(tdoa['acc'])>0:\n navAcc = np.array(tdoa['acc'])\n navAccStd = np.array(tdoa['accStd'])\n\n estimatedPos = (estimatedTDOA * ureg.seconds * ureg.speed_of_light).to(ureg('km')).magnitude\n if not np.any(tdoa['peakLock']):\n meanDiff = np.mean(estimatedPos - truePos)\n else:\n meanDiff = np.mean(\n [eP-tP for tP, eP, pL in zip(truePos, estimatedPos, tdoa['peakLock']) if pL]\n )\n \n estimatedPos = estimatedPos - meanDiff\n estimatedPosStdDev = (\n estimatedTDOAStd * ureg.seconds * ureg.speed_of_light\n ).to(ureg.km).magnitude\n\n\n estimatedPosStdDev_calc = np.std(\n [tP - eP for tP, eP, pL in zip(truePos, estimatedPos, tdoa['peakLock']) if pL]\n )\n \n \n resultsDict = {}\n\n if len(navVel) > 0:\n resultsDict['navVel'] = {\n 'value': navVel,\n 'comment': 'Spacecraft velocity as estimated by internal nav filter',\n 'unit': 'km/s'\n }\n\n resultsDict['navVelStd'] = {\n 'value': navVelStd,\n 'comment': 'Spacecraft velocity standard deviation estimated by internal nav filter',\n 'unit': 'km/s'\n }\n\n resultsDict['navVelErrorStdDev'] = {\n 'value': navVelErrorStdDev,\n 'comment':'Standard deviation of spacecraft velocity estimate error',\n 'unit':'km/s'\n }\n \n if len(navAcc) > 0:\n resultsDict['navAcc'] = {\n 'value': navAcc,\n 'comment': 'Spacecraft acceleration as estimated by internal nav filter',\n 'unit': 'km/s^2'\n }\n\n resultsDict['navAccStd'] = {\n 'value': navAccStd,\n 'comment': 'Spacecraft acceleration standard deviation estimated by internal nav filter',\n 'unit': 'km/s^2'\n }\n resultsDict['velocityOnlyRange'] = {\n 'value': velocityOnlyRangeTruncated,\n 'comment':'Range from velocity propagation',\n 'unit':'km'\n }\n\n\n\n resultsDict['truePos'] = {\n 'value': truePos,\n 'comment': 'True Spacecraft range',\n 'unit': 'km'\n }\n resultsDict['trueVel'] = {\n 'value': trueVel,\n 'comment': 'True Spacecraft velocity',\n 'unit': 'km/s'\n }\n resultsDict['trueAcc'] = {\n 'value': trueAcc,\n 'comment': 'True Spacecraft acceleration',\n 'unit': 'km/s^2'\n }\n resultsDict['estimatedPos'] = {\n 'value': estimatedPos,\n 'comment': 'Estimated spacecraft range (unfiltered)',\n 'unit': 'km'\n }\n resultsDict['estimatedPosStdDev'] = {\n 'value': estimatedPosStdDev,\n 'comment': 'Standard deviation of estimated spacecraft range (unfiltered)',\n 'unit': 'km'\n }\n resultsDict['estimatedPosStdDev_calc'] = {\n 'value': estimatedPosStdDev_calc,\n 'comment': 'Standard deviation of estimated range (true)',\n 'unit': 'km'\n }\n\n resultsDict['trueAtt_DEG'] = {\n 'value': trueAtt_DEG,\n 'comment': 'True attitude solution',\n 'unit': 'degrees'\n }\n resultsDict['estimatedAttitude_DEG'] = {\n 'value': estimateAttitude_DEG,\n 'comment': 'Attitude estimate',\n 'unit': 'degrees'\n }\n resultsDict['attitudeError_DEG'] = {\n 'value': attitudeError_DEG,\n 'comment': 'Attitude estimate error',\n 'unit': 'degrees'\n }\n\n if attitudeError_DEG_PO is not None:\n resultsDict['attitudeError_DEG_PO'] = {\n 'value': attitudeError_DEG_PO,\n 'comment': 'Attitude estimate error from propagation only',\n 'unit': 'degrees'\n }\n \n resultsDict['estimatedAttitudeSigma_DEG'] = {\n 'value': estimatedAttitudeSigma_DEG,\n 'comment': 'Attitude estimate standard deviation',\n 'unit': 'degrees'\n }\n\n resultsDict['peakLock'] = {\n 'value': tdoa['peakLock'],\n 'comment': 'Indication of peak lock',\n 'unit': ''\n }\n resultsDict['estimatedT'] = {\n 'value': estimatedT,\n 'comment': 'Time',\n 'unit': 's'\n }\n resultsDict['pulsarName'] = pulsarName\n \n if saveOutput:\n pickle.dump( resultsDict, open( outputDir + \"/data.p\", \"wb\" ) )\n\n return(resultsDict)\n", "sub_path": "modest/plots/chandraPlots.py", "file_name": "chandraPlots.py", "file_ext": "py", "file_size_in_byte": 31789, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "matplotlib.rcParams", "line_number": 63, "usage_type": "attribute"}, {"api_name": "matplotlib.rcParams", "line_number": 64, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "numpy.std", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 220, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 266, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 271, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 271, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 272, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 273, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 273, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 275, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 275, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 276, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 276, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 277, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 277, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 279, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 279, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 280, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 280, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 281, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 281, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 283, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 283, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 284, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 284, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 285, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 285, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 286, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 286, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 290, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 290, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 301, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 301, "usage_type": "name"}, {"api_name": "mpld3.save_html", "line_number": 308, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 310, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 310, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 312, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 312, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 315, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 315, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 316, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 316, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 329, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 330, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 360, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 360, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 361, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 361, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 363, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 363, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 364, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 364, "usage_type": "name"}, {"api_name": "numpy.std", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 400, "usage_type": "call"}, {"api_name": "mpld3.save_html", "line_number": 418, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 420, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 420, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 422, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 422, "usage_type": "name"}, {"api_name": "numpy.any", "line_number": 429, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 431, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 431, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 432, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 432, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 439, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 447, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 457, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 458, "usage_type": "call"}, {"api_name": "mpld3.save_html", "line_number": 465, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 467, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 467, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 471, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 471, "usage_type": "name"}, {"api_name": "numpy.any", "line_number": 477, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 479, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 479, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 480, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 480, "usage_type": "name"}, {"api_name": "numpy.abs", "line_number": 489, "usage_type": "call"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 491, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 491, "usage_type": "name"}, {"api_name": "matplotlib.ticker.FormatStrFormatter", "line_number": 492, "usage_type": "call"}, {"api_name": "matplotlib.ticker", "line_number": 492, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 515, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 515, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.setp", "line_number": 526, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 526, "usage_type": "name"}, {"api_name": "numpy.std", "line_number": 532, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 533, "usage_type": "call"}, {"api_name": "mpld3.save_html", "line_number": 538, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 540, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 540, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 543, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 543, "usage_type": "name"}, {"api_name": "numpy.std", "line_number": 691, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 691, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 691, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 692, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 692, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 692, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 693, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 693, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 693, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 726, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 727, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 728, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 729, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 730, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 731, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 732, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 734, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 753, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 754, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 755, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 760, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 761, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 762, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 770, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 771, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 772, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 778, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 779, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 781, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 784, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 788, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 796, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 797, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 798, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 801, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 802, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 805, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 806, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 808, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 818, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 937, "usage_type": "call"}]} +{"seq_id": "599651627", "text": "\"\"\"Тесты для загрузки с https://www.conomy.ru/.\"\"\"\nimport pandas as pd\nimport pytest\n\nfrom poptimizer.data.adapters.loaders import conomy\nfrom poptimizer.data.ports import outer\n\n\n@pytest.mark.asyncio\nasync def test_get_browser_closed(mocker):\n \"\"\"Браузер закрывается после использования.\"\"\"\n fake_launch = mocker.patch.object(conomy.pyppeteer, \"launch\")\n async with conomy._get_browser() as browser:\n assert browser is fake_launch.return_value\n\n browser.close.assert_called_once_with() # noqa: WPS441\n\n\n@pytest.mark.asyncio\nasync def test_load_ticker_page(mocker):\n \"\"\"Переход на страницу с тикером.\"\"\"\n fake_page = mocker.AsyncMock()\n fake_element = mocker.AsyncMock()\n fake_page.xpath.return_value = [fake_element]\n\n await conomy._load_ticker_page(fake_page, \"TICKER\")\n\n fake_page.goto.assert_called_once_with(conomy.SEARCH_URL)\n fake_page.xpath.assert_called_once_with(conomy.SEARCH_FIELD)\n fake_element.type.assert_called_once_with(\"TICKER\")\n\n\n@pytest.mark.asyncio\nasync def test_load_dividends_table(mocker):\n \"\"\"Загрузка таблицы с тикером.\"\"\"\n fake_page = mocker.AsyncMock()\n fake_element = mocker.AsyncMock()\n fake_page.xpath.return_value = [fake_element]\n\n await conomy._load_dividends_table(fake_page)\n\n fake_page.xpath.assert_called_once_with(conomy.DIVIDENDS_MENU)\n fake_element.click.assert_called_once_with()\n\n\n@pytest.mark.asyncio\nasync def test_get_html(mocker):\n \"\"\"Последовательный переход и загрузка html с дивидендами.\"\"\"\n fake_get_browser = mocker.patch.object(conomy, \"_get_browser\")\n ctx_mng = fake_get_browser.return_value.__aenter__.return_value # noqa: WPS609\n fake_page = ctx_mng.newPage.return_value\n fake_load_ticker_page = mocker.patch.object(conomy, \"_load_ticker_page\")\n mocker.patch.object(conomy, \"_load_dividends_table\")\n\n html = await conomy._get_html(\"UNAC\")\n\n fake_get_browser.assert_called_once_with()\n fake_load_ticker_page.assert_called_once_with(fake_page, \"UNAC\")\n assert html is fake_page.content.return_value\n\n\nTICKER_CASES = (\n (\"GAZP\", True),\n (\"SNGSP\", False),\n (\"WRONG\", None),\n (\"AAPL-RM\", None),\n)\n\n\n@pytest.mark.parametrize(\"ticker, answer\", TICKER_CASES)\ndef test_is_common(ticker, answer):\n \"\"\"Проверка, что тикер соответствует обыкновенной акции.\"\"\"\n if answer is None:\n with pytest.raises(outer.DataError, match=\"Некорректный тикер\"):\n conomy._is_common(ticker)\n else:\n assert conomy._is_common(ticker) is answer\n\n\nDESC_CASES = (\n (\"CHMF\", 7),\n (\"SNGSP\", 8),\n)\n\n\n@pytest.mark.parametrize(\"ticker, answer\", DESC_CASES)\ndef test_get_col_desc(ticker, answer):\n \"\"\"Правильное составление описания в зависимости от типа акции.\"\"\"\n date, div = conomy._get_col_desc(ticker)\n assert date.num == 5\n assert div.num == answer\n\n\n@pytest.mark.asyncio\nasync def test_conomy_loader_wrong_name():\n \"\"\"Исключение при неверном наименование таблицы.\"\"\"\n loader = conomy.ConomyLoader()\n table_name = outer.TableName(outer.SECURITIES, \"DSKY\")\n with pytest.raises(outer.DataError, match=\"Некорректное имя таблицы для обновления\"):\n await loader.get(table_name)\n\n\nDF = pd.DataFrame(\n [[4.0], [1.0], [2.0], [None]],\n index=[\"2020-01-20\", \"2014-11-25\", \"2014-11-25\", None],\n columns=[\"BELU\"],\n)\nDF_REZ = pd.DataFrame(\n [[3.0], [4.0]],\n index=[\"2014-11-25\", \"2020-01-20\"],\n columns=[\"BELU\"],\n)\n\n\n@pytest.mark.asyncio\nasync def test_conomy_loader(mocker):\n \"\"\"Группировка и сортировка полученных данных.\"\"\"\n mocker.patch.object(conomy, \"_get_html\")\n mocker.patch.object(conomy, \"_get_col_desc\")\n mocker.patch.object(conomy.parser, \"get_df_from_html\", return_value=DF)\n\n loader = conomy.ConomyLoader()\n table_name = outer.TableName(outer.CONOMY, \"BELU\")\n pd.testing.assert_frame_equal(await loader.get(table_name), DF_REZ)\n", "sub_path": "poptimizer/data/adapters/loaders/tests/test_conomy.py", "file_name": "test_conomy.py", "file_ext": "py", "file_size_in_byte": 4242, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "poptimizer.data.adapters.loaders.conomy.pyppeteer", "line_number": 12, "usage_type": "attribute"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 12, "usage_type": "name"}, {"api_name": "poptimizer.data.adapters.loaders.conomy._get_browser", "line_number": 13, "usage_type": "call"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 13, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 9, "usage_type": "attribute"}, {"api_name": "poptimizer.data.adapters.loaders.conomy._load_ticker_page", "line_number": 26, "usage_type": "call"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 26, "usage_type": "name"}, {"api_name": "poptimizer.data.adapters.loaders.conomy.SEARCH_URL", "line_number": 28, "usage_type": "attribute"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 28, "usage_type": "name"}, {"api_name": "poptimizer.data.adapters.loaders.conomy.SEARCH_FIELD", "line_number": 29, "usage_type": "attribute"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 29, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 19, "usage_type": "attribute"}, {"api_name": "poptimizer.data.adapters.loaders.conomy._load_dividends_table", "line_number": 40, "usage_type": "call"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 40, "usage_type": "name"}, {"api_name": "poptimizer.data.adapters.loaders.conomy.DIVIDENDS_MENU", "line_number": 42, "usage_type": "attribute"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 42, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 33, "usage_type": "attribute"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 49, "usage_type": "argument"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 52, "usage_type": "argument"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 53, "usage_type": "argument"}, {"api_name": "poptimizer.data.adapters.loaders.conomy._get_html", "line_number": 55, "usage_type": "call"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 55, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 74, "usage_type": "call"}, {"api_name": "poptimizer.data.ports.outer.DataError", "line_number": 74, "usage_type": "attribute"}, {"api_name": "poptimizer.data.ports.outer", "line_number": 74, "usage_type": "name"}, {"api_name": "poptimizer.data.adapters.loaders.conomy._is_common", "line_number": 75, "usage_type": "call"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 75, "usage_type": "name"}, {"api_name": "poptimizer.data.adapters.loaders.conomy._is_common", "line_number": 77, "usage_type": "call"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 77, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 70, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 70, "usage_type": "attribute"}, {"api_name": "poptimizer.data.adapters.loaders.conomy._get_col_desc", "line_number": 89, "usage_type": "call"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 89, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 86, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 86, "usage_type": "attribute"}, {"api_name": "poptimizer.data.adapters.loaders.conomy.ConomyLoader", "line_number": 97, "usage_type": "call"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 97, "usage_type": "name"}, {"api_name": "poptimizer.data.ports.outer.TableName", "line_number": 98, "usage_type": "call"}, {"api_name": "poptimizer.data.ports.outer", "line_number": 98, "usage_type": "name"}, {"api_name": "poptimizer.data.ports.outer.SECURITIES", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pytest.raises", "line_number": 99, "usage_type": "call"}, {"api_name": "poptimizer.data.ports.outer.DataError", "line_number": 99, "usage_type": "attribute"}, {"api_name": "poptimizer.data.ports.outer", "line_number": 99, "usage_type": "name"}, {"api_name": "pytest.mark", "line_number": 94, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 103, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 108, "usage_type": "call"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 118, "usage_type": "argument"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 119, "usage_type": "argument"}, {"api_name": "poptimizer.data.adapters.loaders.conomy.parser", "line_number": 120, "usage_type": "attribute"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 120, "usage_type": "name"}, {"api_name": "poptimizer.data.adapters.loaders.conomy.ConomyLoader", "line_number": 122, "usage_type": "call"}, {"api_name": "poptimizer.data.adapters.loaders.conomy", "line_number": 122, "usage_type": "name"}, {"api_name": "poptimizer.data.ports.outer.TableName", "line_number": 123, "usage_type": "call"}, {"api_name": "poptimizer.data.ports.outer", "line_number": 123, "usage_type": "name"}, {"api_name": "poptimizer.data.ports.outer.CONOMY", "line_number": 123, "usage_type": "attribute"}, {"api_name": "pandas.testing.assert_frame_equal", "line_number": 124, "usage_type": "call"}, {"api_name": "pandas.testing", "line_number": 124, "usage_type": "attribute"}, {"api_name": "pytest.mark", "line_number": 115, "usage_type": "attribute"}]} +{"seq_id": "443993350", "text": "import time\nimport requests\nfrom bs4 import BeautifulSoup\nfrom multiprocessing import Pool\n\nfrom json_io import write_json\nfrom data import IDs\n\ndef make_prices(id):\n data={}\n url = \"http://fifaonline4.nexon.com/datacenter/PlayerPriceGraph\"\n price = []\n for strong in range(1,11):\n data[\"spid\"] = id\n data[\"n1strong\"] = strong\n # 가끔 정보를 퍼올 때 0BP 로 나오는 경우들이 있어서, 0이 아닌 값이 저장될 때 까지 반복시행\n price_now = 0\n while price_now == 0:\n soup = BeautifulSoup(requests.post(url, data = data).text, \"lxml\")\n # \"--- BP\" string이 나오는 tag 찾아서 soup.select, strip으로 양쪽 공백 없애주고, [:-3]으로 \" BP\" 없애주고,\n # replace로 , 없애줘서 숫자만 뽑아낸 후 int형으로 변환\n price_now = int(soup.select(\"div > div:nth-child(2) > div > strong\")[0].text.strip()[:-3].replace(\",\",\"\"))\n #print(price_now)\n price.append(price_now)\n return {\"id\" : id, \"price\" : price}\n\n# make_prices 함수가 제대로 작동하는지 본 코드에서 직접 체크\nif __name__==\"__main__\":\n now = time.time()\n pool = Pool(4)\n datas = pool.map(make_prices, IDs[:3])\n print(time.time()-now)\n print(datas)", "sub_path": "ver.3/json_files/prices.py", "file_name": "prices.py", "file_ext": "py", "file_size_in_byte": 1298, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "bs4.BeautifulSoup", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 19, "usage_type": "call"}, {"api_name": "time.time", "line_number": 29, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 30, "usage_type": "call"}, {"api_name": "data.IDs", "line_number": 31, "usage_type": "name"}, {"api_name": "time.time", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "575381564", "text": "from Program import Program, Sensor\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib.cm as cm\nfrom matplotlib.colors import Normalize\n\nif __name__ == \"__main__\":\n # fill color in Circle\n cmap = cm.jet # Select colormap U want\n # Declare for set range of value for normalization\n vmin = 0 \n vmax = 1\n # Normalize value for cmap\n norm = Normalize(vmin, vmax)\n\n # số lượng sensor\n n = 20\n program = Program(n)\n\n # # in ra các sensor đã random\n # print(\"CAC SENSOR DA RANDOM LA: \")\n # program.printSensors(program.sensorList)\n # print(\"**************************************\")\n\n # # in ra các sensor có thể giao tiếp với sensor trung tâm\n # print(\"CAC SENSOR CO THE GIAO TIEP VOI SENSOR TRUNG TAM\")\n # if len(program.sinkSensor.nearSensors) != 0:\n # program.printSensors(program.sinkSensor.nearSensors.keys())\n # print(\"======================================\")\n\n # ##########################\n # print(\"CAC SENSOR VA DS SENSOR CO THE GIAO TIEP VOI NO\")\n # for sensor in program.sensorList:\n # print(f\"STT: {sensor.index}\\t(X,Y) = ({sensor.coordinate.x}, {sensor.coordinate.y})\")\n # if len(sensor.nearSensors) != 0:\n # program.printSensors(sensor.nearSensors)\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n for sensor in program.sensorList:\n if len(sensor.pathToSinkSensor) != 0:\n program.findShortestPath(sensor)\n \n for sensor in program.sensorList:\n program.printAllPathAvailable(sensor)\n ###########################\n\n # danh sách các sensor có đường đi tới sink sensor\n # dùng set thay vì list để loại bỏ các sensor trùng nhau\n colored_sensor = set()\n\n for sensor in program.sensorList:\n if len(sensor.pathToSinkSensor) != 0:\n for s in sensor.pathToSinkSensor.keys():\n for i in s:\n colored_sensor.add(i)\n # danh sách các tọa độ x\n x_values = list()\n # danh sách các tọa độ y\n y_values = list()\n # danh sách các lable là số thứ tự của từng sensor\n labels = list()\n\n # vẽ sink sensor\n drawing_sink_sensor = plt.Circle((program.sinkSensor.coordinate.x, \n program.sinkSensor.coordinate.y), sensor.radius, color=cmap(norm(0.7)))\n # vẽ danh sách các sensor\n drawing_circle_list = list()\n # duyệt lần lượt sensorList\n for sensor in program.sensorList:\n # nếu sensor có đường đi tới sink sensor\n # thì set màu cho sensor đó\n if sensor in colored_sensor:\n drawing_circle = plt.Circle((sensor.coordinate.x, sensor.coordinate.y), sensor.radius, color=cmap(norm(0.5)))\n # ngược lại màu mặc định\n else:\n drawing_circle = plt.Circle((sensor.coordinate.x, sensor.coordinate.y), sensor.radius, color=cmap(norm(0.3)))\n \n drawing_circle_list.append(drawing_circle)\n\n x_values.append(sensor.coordinate.x)\n y_values.append(sensor.coordinate.y)\n labels.append(sensor.index)\n\n # thêm tọa độ sink sensor vào danh sách tọa độ\n x_values.append(program.sinkSensor.coordinate.x)\n y_values.append(program.sinkSensor.coordinate.y)\n\n # danh sách các điểm để vẽ\n x_points = np.array(x_values)\n y_points = np.array(y_values)\n\n figure, axes = plt.subplots()\n axes.scatter(x_points, y_points)\n axes.set_aspect(1)\n\n \n for i in drawing_circle_list:\n # vẽ danh sách các sensor đã random\n axes.add_artist(i)\n\n # vẽ sink sensor\n axes.add_artist(drawing_sink_sensor)\n\n # vẽ các số thứ tự tương ứng\n for i, txt in enumerate(labels):\n axes.annotate(txt, (x_points[i], y_points[i]))\n\n # vẽ các điểm ứng với các tọa độ các sensor đã random không có đường nối\n plt.plot(x_points, y_points, \"o\")\n # set giá trị limit cho trục x và y\n plt.xlim([0, 100])\n plt.ylim([0, 100])\n plt.show()\n\n \n \n \n \n # print(\"CAC SENSOR VA DS SENSOR CO THE GIAO TIEP VOI NO\")\n # for sensor in program.sensorList:\n # print(f\"STT: {sensor.index}\\t(X,Y) = ({sensor.coordinate.x}, {sensor.coordinate.y})\")\n # if len(sensor.nearSensors) != 0:\n # program.printSensors(sensor.nearSensors)\n # print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n\n # for sensor in program.sensorList:\n # if len(sensor.pathToSinkSensor) != 0:\n # program.findShortestPath(sensor.pathToSinkSensor)\n \n # for sensor in program.sensorList:\n # program.printAllPathAvailable(sensor)\n", "sub_path": "BTL-JAVA-1/Main.py", "file_name": "Main.py", "file_ext": "py", "file_size_in_byte": 4679, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "matplotlib.cm.jet", "line_number": 9, "usage_type": "attribute"}, {"api_name": "matplotlib.cm", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.colors.Normalize", "line_number": 14, "usage_type": "call"}, {"api_name": "Program.Program", "line_number": 18, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 64, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 64, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Circle", "line_number": 76, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 76, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 113, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 113, "usage_type": "name"}]} +{"seq_id": "489410554", "text": "\nimport os\nimport subprocess\nimport shutil\n\nimport librosa\nimport audioread\n\nfrom acousticsim.utils import write_wav\n\nfrom ..sql import get_or_create\n\nfrom ..sql.models import (SoundFile, Discourse)\n\ndef resample_audio(filepath, new_filepath, new_sr):\n if os.path.exists(new_filepath):\n return\n sox_path = shutil.which('sox')\n if sox_path is not None:\n subprocess.call(['sox', filepath.replace('\\\\','/'), new_filepath.replace('\\\\','/'),\n 'gain', '-1', 'rate', '-I', str(new_sr)])\n else:\n sig, sr = librosa.load(filepath, sr = new_sr, mono = False)\n if len(sig.shape) > 1:\n sig = sig.T\n write_wav(sig, sr, new_filepath)\n\ndef add_discourse_sound_info(corpus_context, discourse, filepath):\n with audioread.audio_open(filepath) as f:\n sample_rate = f.samplerate\n n_channels = f.channels\n duration = f.duration\n audio_dir = corpus_context.discourse_audio_directory(discourse)\n os.makedirs(audio_dir, exist_ok = True)\n consonant_rate = 16000\n consonant_path = os.path.join(audio_dir, 'consonant.wav')\n vowel_rate = 11000\n vowel_path = os.path.join(audio_dir, 'vowel.wav')\n low_freq_rate = 2000\n low_freq_path = os.path.join(audio_dir, 'low_freq.wav')\n if sample_rate > consonant_rate:\n resample_audio(filepath, consonant_path, consonant_rate)\n else:\n shutil.copy(filepath, consonant_path)\n consonant_rate = sample_rate\n if sample_rate > vowel_rate:\n resample_audio(consonant_path, vowel_path, vowel_rate)\n else:\n shutil.copy(filepath, vowel_path)\n vowel_rate = sample_rate\n if sample_rate > low_freq_rate:\n resample_audio(vowel_path, low_freq_path, low_freq_rate)\n else:\n shutil.copy(filepath, low_freq_path)\n low_freq_rate = sample_rate\n d, _ = get_or_create(corpus_context.sql_session, Discourse, name = discourse)\n user_path = os.path.expanduser('~')\n sf = get_or_create(corpus_context.sql_session, SoundFile, filepath = filepath,\n consonant_filepath = consonant_path.replace(user_path, '~'),\n vowel_filepath = vowel_path.replace(user_path, '~'),\n low_freq_filepath = low_freq_path.replace(user_path, '~'),\n duration = duration, sampling_rate = sample_rate,\n n_channels = n_channels, discourse = d)\n\ndef setup_audio(corpus_context, data):\n if data.wav_path is None or not os.path.exists(data.wav_path):\n return\n add_discourse_sound_info(corpus_context, data.name, data.wav_path)\n", "sub_path": "polyglotdb/acoustics/io.py", "file_name": "io.py", "file_ext": "py", "file_size_in_byte": 2563, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.path.exists", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "shutil.which", "line_number": 18, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 20, "usage_type": "call"}, {"api_name": "librosa.load", "line_number": 23, "usage_type": "call"}, {"api_name": "acousticsim.utils.write_wav", "line_number": 26, "usage_type": "call"}, {"api_name": "audioread.audio_open", "line_number": 29, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "shutil.copy", "line_number": 44, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 49, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 54, "usage_type": "call"}, {"api_name": "sql.get_or_create", "line_number": 56, "usage_type": "call"}, {"api_name": "sql.models.Discourse", "line_number": 56, "usage_type": "argument"}, {"api_name": "os.path.expanduser", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "sql.get_or_create", "line_number": 58, "usage_type": "call"}, {"api_name": "sql.models.SoundFile", "line_number": 58, "usage_type": "argument"}, {"api_name": "os.path.exists", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}]} +{"seq_id": "629497776", "text": "import os\nfrom time import sleep\n\nimport img2pdf as pdf\nfrom selenium import webdriver\nfrom selenium.common.exceptions import ElementNotVisibleException\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.common.exceptions import WebDriverException\n\nfrom lib import baseFunctions as bf\nfrom lib import data\nfrom lib.baseFunctions import Api\n\n# Read product codes into a list\njson_data = Api.get_json_data('product_add')\ndf = Api.read_into_dataframe(json_data, print_head=False, print_dataframe=False)\nproduct_codes = df.loc[:, 'content']\nprint(product_codes)\n\n# Choose a folder where to store screenshots of products\noutput_folder = data.FileDialog.ask_save_directory(\"Choose Output Folder\")\n\n# Selenium web scraping\ndriver = webdriver.Chrome()\nbf.Startax.init(driver)\n\n# Wait to compensate for site loading\nsleep(3) # 3 seconds\n\nsearch_box = bf.Startax.find_searchbox(driver)\n\n# Navigate to output folder\nos.chdir(output_folder)\n\n# Iterate over list and save screenshots of new products\nfor i in range(len(product_codes)):\n # bf.Startax.single_product_search(driver, product_codes[i])\n bf.Startax.search(driver, product_codes[i])\n sleep(3)\n if bf.Startax.exists_by_class(driver, \"toggle_message_div_contents\"):\n kliki_siia = driver.find_element_by_class_name(\"toggle_message_div_contents\")\n kliki_siia.click()\n try:\n product_fit = driver.find_elements_by_class_name(\"toggle_model_list\")\n for n in range(len(product_fit)):\n product_fit[n].click()\n driver.execute_script('window.scrollTo(0, 0);')\n except NoSuchElementException:\n driver.execute_script('window.scrollTo(0, 0);')\n driver.save_screenshot(str(product_codes[i]) + \".png\")\n continue\n except ElementNotVisibleException:\n driver.execute_script('window.scrollTo(0, 0);')\n driver.save_screenshot(str(product_codes[i]) + \".png\")\n continue\n except WebDriverException:\n driver.execute_script('window.scrollTo(0, 0);')\n driver.save_screenshot(str(product_codes[i]) + \".png\")\n continue\n driver.execute_script('window.scrollTo(0, 0);')\n driver.save_screenshot(str(product_codes[i]) + \".png\")\n sleep(3)\ndriver.quit()\n\n# Create PDF file from screenshots\nimagelist = os.listdir(output_folder)\npdf_bytes = pdf.convert(imagelist)\n\noutput_pdf = open('uued_tooted.pdf', 'wb')\noutput_pdf.write(pdf_bytes)\n\n# Remove image files\nfor image in imagelist:\n os.remove(os.path.join(output_folder, image))\n", "sub_path": "startax/product_add.py", "file_name": "product_add.py", "file_ext": "py", "file_size_in_byte": 2514, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "lib.baseFunctions.Api.get_json_data", "line_number": 15, "usage_type": "call"}, {"api_name": "lib.baseFunctions.Api", "line_number": 15, "usage_type": "name"}, {"api_name": "lib.baseFunctions.Api.read_into_dataframe", "line_number": 16, "usage_type": "call"}, {"api_name": "lib.baseFunctions.Api", "line_number": 16, "usage_type": "name"}, {"api_name": "lib.data.FileDialog.ask_save_directory", "line_number": 21, "usage_type": "call"}, {"api_name": "lib.data.FileDialog", "line_number": 21, "usage_type": "attribute"}, {"api_name": "lib.data", "line_number": 21, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 24, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 24, "usage_type": "name"}, {"api_name": "lib.baseFunctions.Startax.init", "line_number": 25, "usage_type": "call"}, {"api_name": "lib.baseFunctions.Startax", "line_number": 25, "usage_type": "attribute"}, {"api_name": "lib.baseFunctions", "line_number": 25, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "lib.baseFunctions.Startax.find_searchbox", "line_number": 30, "usage_type": "call"}, {"api_name": "lib.baseFunctions.Startax", "line_number": 30, "usage_type": "attribute"}, {"api_name": "lib.baseFunctions", "line_number": 30, "usage_type": "name"}, {"api_name": "os.chdir", "line_number": 33, "usage_type": "call"}, {"api_name": "lib.baseFunctions.Startax.search", "line_number": 38, "usage_type": "call"}, {"api_name": "lib.baseFunctions.Startax", "line_number": 38, "usage_type": "attribute"}, {"api_name": "lib.baseFunctions", "line_number": 38, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 39, "usage_type": "call"}, {"api_name": "lib.baseFunctions.Startax.exists_by_class", "line_number": 40, "usage_type": "call"}, {"api_name": "lib.baseFunctions.Startax", "line_number": 40, "usage_type": "attribute"}, {"api_name": "lib.baseFunctions", "line_number": 40, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.NoSuchElementException", "line_number": 48, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.ElementNotVisibleException", "line_number": 52, "usage_type": "name"}, {"api_name": "selenium.common.exceptions.WebDriverException", "line_number": 56, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 62, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 66, "usage_type": "call"}, {"api_name": "img2pdf.convert", "line_number": 67, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 74, "usage_type": "call"}, {"api_name": "os.path", "line_number": 74, "usage_type": "attribute"}]} +{"seq_id": "266185252", "text": "import logging\nfrom typing import Optional\nfrom urllib.parse import urljoin\n\nfrom httpx import AsyncClient, HTTPError\n\nfrom app.exceptions import GetArgoWorkflowRequestFailed\n\nlogger = logging.getLogger(__name__)\n\n\nclass ArgoWorkflowClient:\n\n def __init__(self, host: str, namespace: str):\n super().__init__()\n self.url = f'http://{host}/api/v1/workflows/{namespace}/'\n\n async def get_workflow_metadata(self, workflow_name: str) -> Optional[dict]:\n \"\"\"\n Connects to Argo workflow host and return metadata about the given workflow\n \"\"\"\n async with AsyncClient() as client:\n try:\n resp = await client.get(urljoin(self.url, f'{workflow_name}?fields=metadata.labels'))\n except HTTPError as err:\n logger.error(f\"Error while requesting {err.request.url}\")\n raise GetArgoWorkflowRequestFailed(workflow_name, self.url)\n\n if resp.status_code == 404:\n logger.error(f\"Could not find argo workflow with name {workflow_name}\")\n return None\n\n if resp.status_code != 200:\n msg = f\"Something went wrong when getting metadata for workflow {workflow_name}\"\n logger.error(msg)\n raise GetArgoWorkflowRequestFailed(workflow_name, self.url)\n\n return resp.json()\n", "sub_path": "mottak-arkiv-service/app/connectors/argo_workflow/argo_workflow_client.py", "file_name": "argo_workflow_client.py", "file_ext": "py", "file_size_in_byte": 1363, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "logging.getLogger", "line_number": 9, "usage_type": "call"}, {"api_name": "httpx.AsyncClient", "line_number": 22, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 24, "usage_type": "call"}, {"api_name": "httpx.HTTPError", "line_number": 25, "usage_type": "name"}, {"api_name": "app.exceptions.GetArgoWorkflowRequestFailed", "line_number": 27, "usage_type": "call"}, {"api_name": "app.exceptions.GetArgoWorkflowRequestFailed", "line_number": 36, "usage_type": "call"}, {"api_name": "typing.Optional", "line_number": 18, "usage_type": "name"}]} +{"seq_id": "267445645", "text": "# original Copyright (C) 2008-2014 Ruben decrop\n# modifications Copyright (C) 2015 Chessdevil consulting\n\n__author__ = 'ruben'\n\nimport logging\nlog = logging.getLogger(__name__)\n\nimport io\nfrom reddevil.models import RdDocument, RdField\n\nclass Player(RdDocument):\n chesstitle = RdField()\n firstname = RdField()\n name = RdField()\n id_national = RdField()\n rating = RdField()\n pairingix = RdField()\n\nclass SinglePairing(RdDocument):\n whitepix = RdField()\n blackpix = RdField()\n result = RdField()\n\nclass SingleStanding(RdDocument):\n pix = RdField()\n ngames = RdField()\n points = RdField()\n\nclass PairingRound(RdDocument):\n round = RdField()\n details = RdField(default=[])\n\nclass StandingsRound(RdDocument):\n round = RdField()\n details = RdField(default=[])\n\nclass Tournament(RdDocument):\n category = RdField()\n players = RdField(default=[])\n pairings = RdField(default=[])\n standings = RdField(default=[])\n\n _colname = 'tournament'\n\n @classmethod\n def create(cls, cat, textfile):\n \"\"\"\n creates a photo\n :param name: the category\n :param textfile: textfiel with all players\n :return: the created tournament\n \"\"\"\n from bjk2016.models.subscription import Subscription\n # skip 3 lines\n textfile.stream.readline()\n textfile.stream.readline()\n textfile.stream.readline()\n textfile.stream.readline()\n\n trn = Tournament()\n trn.category = cat\n i = 0\n for l in textfile.stream.readlines():\n i += 1\n flds = l.decode('latin-1').strip(',').split(',')\n p = Player()\n p.id_national = flds[6].strip().lstrip('0')\n p.pairingix = flds[2].strip()\n s = Subscription.mongo_find_one({'id_national':p.id_national})\n if s:\n p.name = s.name\n p.firstname = s.firstname\n p.chesstitle = s.chesstitle\n p.rating = s.rating\n trn.players.append(p)\n else:\n log.error('no player for id_national: %s', p.id_national)\n trn.mongo_save()\n log.info('%d players added', i)\n return trn\n\n def uploadpairing(self, round, textfile):\n \"\"\"\n process a upload Pairing\n \"\"\"\n\n parnd = PairingRound()\n parnd.details = []\n parnd.round = round\n for i,p in enumerate(self.pairings):\n if p['round'] == round:\n log.info('replacing round %s', round)\n self.pairings[i] = parnd\n break\n else:\n self.pairings.append(parnd)\n\n # skip 3 lines\n textfile.stream.readline()\n textfile.stream.readline()\n textfile.stream.readline()\n for l in textfile.stream.readlines():\n flds = l.decode('latin-1').strip(',').split(',')\n sp = SinglePairing()\n if len(flds) < 5:\n continue\n if len(flds) == 5: #afwezig\n sp.whitepix = flds[1][1:-1].strip()\n sp.blackpix = -2\n parnd.details.append(sp)\n continue\n if len(flds) == 6: #bye\n sp.whitepix = flds[2][1:-1].strip()\n sp.blackpix = -1\n parnd.details.append(sp)\n continue\n sp.whitepix = flds[2][1:-1].strip()\n sp.blackpix = flds[5][1:-1].strip()\n sp.result = flds[6].strip()\n parnd.details.append(sp)\n log.info('category %s: %d pairings in round %s', self.category,\n len(parnd.details), round)\n self.mongo_save()\n\n\n def uploadstandings(self, round, textfile):\n \"\"\"\n process a upload Pairing\n \"\"\"\n strnd = StandingsRound()\n strnd.details = []\n strnd.round = round\n for i,p in enumerate(self.standings):\n if p['round'] == round:\n log.info('replacing round %s', round)\n self.standings[i] = strnd\n break\n else:\n self.standings.append(strnd)\n\n # skip 4 lines\n textfile.stream.readline()\n textfile.stream.readline()\n textfile.stream.readline()\n\n # read fieldnames\n fldnames = textfile.stream.readline().decode('latin-1').strip()\\\n .split(',')\n for i,name in enumerate(fldnames):\n fldnames[i] = name.strip()\n\n for l in textfile.stream.readlines():\n flds = l.decode('latin-1').split(',')\n if len(flds) < 4:\n continue\n ss = SingleStanding()\n i += 1\n for i,f in enumerate(flds):\n fldname = fldnames[i]\n if fldname == 'Rnk':\n ss.pix = f.strip()\n continue\n if fldname == 'Ptn':\n ss.points = f.strip()\n continue\n if fldname == 'P':\n ss.ngames = f.strip()\n continue\n if fldname in ['N', 'Naam','ELO', 'Titel', '','V/M', 'Nr']:\n continue\n ss[fldname] = f\n\n strnd.details.append(ss)\n log.info('category %s: %d standings in round %s', self.category,\n len(strnd.details), round)\n self.mongo_save()\n\n\n\n\n", "sub_path": "bjk2016/models/tournament.py", "file_name": "tournament.py", "file_ext": "py", "file_size_in_byte": 5392, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "logging.getLogger", "line_number": 7, "usage_type": "call"}, {"api_name": "reddevil.models.RdDocument", "line_number": 12, "usage_type": "name"}, {"api_name": "reddevil.models.RdField", "line_number": 13, "usage_type": "call"}, {"api_name": "reddevil.models.RdField", "line_number": 14, "usage_type": "call"}, {"api_name": "reddevil.models.RdField", "line_number": 15, "usage_type": "call"}, {"api_name": "reddevil.models.RdField", "line_number": 16, "usage_type": "call"}, {"api_name": "reddevil.models.RdField", "line_number": 17, "usage_type": "call"}, {"api_name": "reddevil.models.RdField", "line_number": 18, "usage_type": "call"}, {"api_name": "reddevil.models.RdDocument", "line_number": 20, "usage_type": "name"}, {"api_name": "reddevil.models.RdField", "line_number": 21, "usage_type": "call"}, {"api_name": "reddevil.models.RdField", "line_number": 22, "usage_type": "call"}, {"api_name": "reddevil.models.RdField", "line_number": 23, "usage_type": "call"}, {"api_name": "reddevil.models.RdDocument", "line_number": 25, "usage_type": "name"}, {"api_name": "reddevil.models.RdField", "line_number": 26, "usage_type": "call"}, {"api_name": "reddevil.models.RdField", "line_number": 27, "usage_type": "call"}, {"api_name": "reddevil.models.RdField", "line_number": 28, "usage_type": "call"}, {"api_name": "reddevil.models.RdDocument", "line_number": 30, "usage_type": "name"}, {"api_name": "reddevil.models.RdField", "line_number": 31, "usage_type": "call"}, {"api_name": "reddevil.models.RdField", "line_number": 32, "usage_type": "call"}, {"api_name": "reddevil.models.RdDocument", "line_number": 34, "usage_type": "name"}, {"api_name": "reddevil.models.RdField", "line_number": 35, "usage_type": "call"}, {"api_name": "reddevil.models.RdField", "line_number": 36, "usage_type": "call"}, {"api_name": "reddevil.models.RdDocument", "line_number": 38, "usage_type": "name"}, {"api_name": "reddevil.models.RdField", "line_number": 39, "usage_type": "call"}, {"api_name": "reddevil.models.RdField", "line_number": 40, "usage_type": "call"}, {"api_name": "reddevil.models.RdField", "line_number": 41, "usage_type": "call"}, {"api_name": "reddevil.models.RdField", "line_number": 42, "usage_type": "call"}, {"api_name": "{'Subscription': 'bjk2016.models.subscription.Subscription'}", "line_number": 61, "usage_type": "call"}, {"api_name": "bjk2016.models.subscription.Subscription.mongo_find_one", "line_number": 70, "usage_type": "call"}, {"api_name": "bjk2016.models.subscription.Subscription", "line_number": 70, "usage_type": "name"}]} +{"seq_id": "404734263", "text": "'''\nhttps://github.com/PacktPublishing/Python-Real-World-Machine-Learning\nhttp://scikit-learn.org/stable/auto_examples/applications/plot_stock_market.html\nCluster 1 --> CL\nCluster 2 --> ES, NQ\nCluster 3 --> BABA, NTES, BIDU\nCluster 4 --> SE\nCluster 5 --> NVDA, AVGO, INTC, MU, SMH\nCluster 6 --> SPX, AAPL, AMZN, MSFT, GOOGL, FB, ADBE, CRM, NFLX, TSLA, SPY, IWM, QQQ, MA, V, HD, MMM\nCluster 7 --> JPM, BLK, WFC, MS, GS\nCluster 8 --> JNJ, MRK\nCluster 9 --> PG\nCluster 10 --> WMT\n'''\n\n\n\n# import json\n# import datetime\n# from matplotlib.finance import quotes_historical_yahoo_ochl as quotes_yahoo\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import LineCollection\nimport pandas\nimport numpy as np\nfrom sklearn import covariance, cluster, manifold\nimport os, sys\nsys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), \"../\"))\n#import tps_common,tps_feeder\n\nfrom tps import common, ohlc_file\n\nMIN_LEN_OHLC = 65535\n\n'''\nstock_dt_ohlc = np.dtype([\n (str('open'), np.float),\n (str('close'), np.float),\n (str('high'), np.float),\n (str('low'), np.float),\n (str('volume'), np.float),\n (str('aclose'), np.float)])\n\n\ndef ohlc_to_np(ohlc):\n results=[]\n for row_index, row in ohlc.iterrows():\n results.append((row['Open'],row['Close'],row['High'],row['Low'],row['Volume'],row['Adj Close']))\n\n results.reverse()\n d = np.array(results, dtype=stock_dt_ohlc)\n scale = d['aclose'] / d['close']\n scale[np.isinf(scale)] = np.nan\n #print(\"scale\",scale)\n d['open'] *= scale\n d['high'] *= scale\n d['low'] *= scale\n d['close'] *= scale\n #return d\n return d.view(np.recarray) # Close enough to former Bunch return\n'''\n\n\ndef model(quotes,names):\n opening_quotes = np.vstack([q['Open'][:MIN_LEN_OHLC] for q in quotes])\n closing_quotes = np.vstack([q['Close'][:MIN_LEN_OHLC] for q in quotes])\n\n #opening_quotes = np.array([quote.open for quote in quotes]).astype(np.float)\n #closing_quotes = np.array([quote.close for quote in quotes]).astype(np.float)\n # The daily fluctuations of the quotes\n delta_quotes = closing_quotes - opening_quotes\n\n # Build a graph model from the correlations\n edge_model = covariance.GraphLassoCV()\n\n \"\"\"\n transpose() rotate [[0.36,-0.93...symbol1],[1.89,-1.5..symbol2]] ->\n [[0.36,1.89],[-0.93,-1.5],...]\n \"\"\"\n\n # Standardize the data\n X = delta_quotes.copy().T\n #print(delta_quotes)\n X /= X.std(axis=0)\n # print(X,type(X))\n\n # Train the model\n with np.errstate(invalid='ignore'):\n edge_model.fit(X)\n\n # Build clustering model using affinity propagation\n _, labels = cluster.affinity_propagation(edge_model.covariance_)\n n_labels = labels.max()\n\n '''\n df = pandas.read_csv(\"ohlc_pattern.csv\", index_col=None, header=0)\n\n # Print the results of clustering\n for i in range(n_labels + 1):\n symbol_list = names[labels == i]\n df_symbols = df[df['symbol'].isin(symbol_list)]\n #print(df_symbols)\n print(\"Cluster\", i + 1, \"-->\", ', '.join(names[labels == i]), \"perf 20 days:\", round(df_symbols['perf20'].mean(),3))\n print(df[df['symbol'].isin(names)])\n '''\n\n\n # Print the results of clustering\n for i in range(n_labels + 1):\n symbol_list = names[labels == i]\n #print(df_symbols)\n print(\"Cluster\", i + 1, \"-->\", ', '.join(names[labels == i]))\n\n\n #pass\n\n # #############################################################################\n # Find a low-dimension embedding for visualization: find the best position of\n # the nodes (the stocks) on a 2D plane\n\n # We use a dense eigen_solver to achieve reproducibility (arpack is\n # initiated with random vectors that we don't control). In addition, we\n # use a large number of neighbors to capture the large-scale structure.\n node_position_model = manifold.LocallyLinearEmbedding(\n n_components=2, eigen_solver='dense', n_neighbors=6)\n\n embedding = node_position_model.fit_transform(X.T).T\n\n # #############################################################################\n # Visualization\n plt.figure(1, facecolor='w', figsize=(10, 8))\n plt.clf()\n ax = plt.axes([0., 0., 1., 1.])\n plt.axis('off')\n\n # Display a graph of the partial correlations\n partial_correlations = edge_model.precision_.copy()\n d = 1 / np.sqrt(np.diag(partial_correlations))\n partial_correlations *= d\n partial_correlations *= d[:, np.newaxis]\n non_zero = (np.abs(np.triu(partial_correlations, k=1)) > 0.02)\n\n # Plot the nodes using the coordinates of our embedding\n cmap = plt.cm.get_cmap(\"Spectral\")\n #plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels, cmap=plt.cm.spectral)\n plt.scatter(embedding[0], embedding[1], s=100 * d ** 2, c=labels, cmap=cmap)\n\n # Plot the edges\n start_idx, end_idx = np.where(non_zero)\n # a sequence of (*line0*, *line1*, *line2*), where::\n # linen = (x0, y0), (x1, y1), ... (xm, ym)\n segments = [[embedding[:, start], embedding[:, stop]]\n for start, stop in zip(start_idx, end_idx)]\n values = np.abs(partial_correlations[non_zero])\n lc = LineCollection(segments,\n zorder=0, cmap=plt.cm.hot_r,\n norm=plt.Normalize(0, .7 * values.max()))\n lc.set_array(values)\n lc.set_linewidths(15 * values)\n ax.add_collection(lc)\n\n # Add a label to each node. The challenge here is that we want to\n # position the labels to avoid overlap with other labels\n for index, (name, label, (x, y)) in enumerate(\n zip(names, labels, embedding.T)):\n\n dx = x - embedding[0]\n dx[index] = 1\n dy = y - embedding[1]\n dy[index] = 1\n this_dx = dx[np.argmin(np.abs(dy))]\n this_dy = dy[np.argmin(np.abs(dx))]\n if this_dx > 0:\n horizontalalignment = 'left'\n x = x + .002\n else:\n horizontalalignment = 'right'\n x = x - .002\n if this_dy > 0:\n verticalalignment = 'bottom'\n y = y + .002\n else:\n verticalalignment = 'top'\n y = y - .002\n plt.text(x, y, name, size=10,\n horizontalalignment=horizontalalignment,\n verticalalignment=verticalalignment,\n bbox=dict(facecolor='w',\n edgecolor=cmap(label / float(n_labels)),\n alpha=.6))\n\n plt.xlim(embedding[0].min() - .15 * embedding[0].ptp(),\n embedding[0].max() + .10 * embedding[0].ptp(), )\n plt.ylim(embedding[1].min() - .03 * embedding[1].ptp(),\n embedding[1].max() + .03 * embedding[1].ptp())\n\n plt.show()\n\n#g_ohlc_dct={}\n\n\ndef main(param):\n #tick_dct = param['symbol_list']\n #fdr = param['feeder']\n #print(param)\n df = common.load_symbol_csv('../symbol_ib.txt')\n fdrname = 'IB1D'\n quotes = []\n symbols = []\n global MIN_LEN_OHLC\n\n for index, row in df.iterrows():\n symbol = row['symbol']\n ohlc = ohlc_file.OhlcFile.get_ohlc(row, fdrname)\n #d = ohlc_to_np(ohlc)\n MIN_LEN_OHLC = min(MIN_LEN_OHLC, len(ohlc))\n #print('add %s, len=%s' %(symbol,len(ohlc)))\n quotes.append(ohlc)\n symbols.append(symbol)\n\n names = np.array(symbols)\n # print(quotes)\n model(quotes,names)\n pass\n\n\nif __name__ == '__main__':\n main(None)\n", "sub_path": "machine_learning/ohlc_pattern.py", "file_name": "ohlc_pattern.py", "file_ext": "py", "file_size_in_byte": 7406, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.path.append", "line_number": 27, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 65, "usage_type": "call"}, {"api_name": "sklearn.covariance.GraphLassoCV", "line_number": 73, "usage_type": "call"}, {"api_name": "sklearn.covariance", "line_number": 73, "usage_type": "name"}, {"api_name": "numpy.errstate", "line_number": 87, "usage_type": "call"}, {"api_name": "sklearn.cluster.affinity_propagation", "line_number": 91, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 91, "usage_type": "name"}, {"api_name": "sklearn.manifold.LocallyLinearEmbedding", "line_number": 123, "usage_type": "call"}, {"api_name": "sklearn.manifold", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 130, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.clf", "line_number": 131, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 131, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 132, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 132, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 133, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.newaxis", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.abs", "line_number": 140, "usage_type": "call"}, {"api_name": "numpy.triu", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm.get_cmap", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 143, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 145, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 145, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.collections.LineCollection", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 155, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.Normalize", "line_number": 156, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 156, "usage_type": "name"}, {"api_name": "numpy.argmin", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.text", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 193, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "tps.common.load_symbol_csv", "line_number": 205, "usage_type": "call"}, {"api_name": "tps.common", "line_number": 205, "usage_type": "name"}, {"api_name": "tps.ohlc_file.OhlcFile.get_ohlc", "line_number": 213, "usage_type": "call"}, {"api_name": "tps.ohlc_file.OhlcFile", "line_number": 213, "usage_type": "attribute"}, {"api_name": "tps.ohlc_file", "line_number": 213, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 220, "usage_type": "call"}]} +{"seq_id": "165785494", "text": "PLUGIN_NAME = u\"Release Format (types of mediums)\"\nPLUGIN_AUTHOR = u\"Nobody Cares\"\nPLUGIN_DESCRIPTION = u\"\"\"Like %media% tag this plugin gives you\nopportunity to use %~releaseformat% that returns formated string with\nall types of mediums in release (example: 4×CD + DVD-Video)\"\"\"\nPLUGIN_VERSION = \"0.1\"\nPLUGIN_API_VERSIONS = [\"0.9.0\", \"0.10\", \"0.15\", \"2.0\"]\n\nfrom collections import OrderedDict\n\nfrom picard.metadata import register_album_metadata_processor\nfrom picard import log\n\n\ndef convert_to_str(item):\n medium_format, medium_count = item\n if medium_count == 1:\n return medium_format\n return \"%d×%s\" % (medium_count, medium_format)\n\ndef release_format(album, metadata, release):\n mediums = OrderedDict()\n for medium in release.get(\"media\") or []:\n fmt = medium.get(\"format\") or \"UNK\"\n if fmt not in mediums:\n mediums[fmt] = 1\n else:\n mediums[fmt] += 1\n metadata[\"~releaseformat\"] = \" + \".join(map(convert_to_str, mediums.items()))\n\n\nregister_album_metadata_processor(release_format)\n", "sub_path": "release_format.py", "file_name": "release_format.py", "file_ext": "py", "file_size_in_byte": 1059, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "collections.OrderedDict", "line_number": 22, "usage_type": "call"}, {"api_name": "picard.metadata.register_album_metadata_processor", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "346131704", "text": "# Python standard libraries\nimport json\nimport os\nimport sqlite3\n\n# Third party libraries\nfrom flask import Flask, redirect, request, url_for, render_template, jsonify\nfrom flask_login import (\n LoginManager,\n current_user,\n login_required,\n login_user,\n logout_user,\n)\nfrom oauthlib.oauth2 import WebApplicationClient\nimport requests\n\n# Internal imports\nfrom db import init_db_command\nfrom user import User\n\n# Configuration\nGOOGLE_CLIENT_ID = os.environ.get(\"GOOGLE_CLIENT_ID\", None)\nGOOGLE_CLIENT_SECRET = os.environ.get(\"GOOGLE_CLIENT_SECRET\", None)\nGOOGLE_DISCOVERY_URL = (\n \"https://accounts.google.com/.well-known/openid-configuration\"\n)\n\n# Flask app setup\napp = Flask(__name__)\napp.secret_key = os.environ.get(\"SECRET_KEY\") or os.urandom(24)\n\n# User session management setup\n# https://flask-login.readthedocs.io/en/latest\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n\n@login_manager.unauthorized_handler\ndef unauthorized():\n return \"You must be logged in to access this content.\", 403\n\n\n# Naive database setup\ntry:\n init_db_command()\nexcept sqlite3.OperationalError:\n # Assume it's already been created\n pass\n\n# OAuth2 client setup\nclient = WebApplicationClient(GOOGLE_CLIENT_ID)\n\n# Flask-Login helper to retrieve a user from our db\n@login_manager.user_loader\ndef load_user(user_id):\n return User.get(user_id)\n\n\n@app.route(\"/\")\ndef index():\n if current_user.is_authenticated:\n return render_template(\"/survey.html\")\n else:\n return render_template(\"/login.html\")\n\n#route for results display\n@app.route(\"/results\", methods=[\"GET\", \"POST\"]) \ndef results():\n if current_user.is_authenticated:\n #when forum is posted store data into databas\n if request.method == \"POST\":\n req = request.form\n quest1 = req[\"fruit\"]\n if not quest1:\n quest1 = \"No ANS\"\n quest2 = req[\"vegan\"]\n if not quest2:\n quest2 = \"No ANS\"\n quest3 = req[\"snacks\"]\n if not quest3:\n quest3 = \"No ANS\"\n User.setQuestions(current_user.id, quest1, quest2, quest3)\n return redirect(request.url)\n \n #if not post then render the page\n else:\n return render_template(\"/results.html\")\n else:\n return redirect(url_for(\"index\"))\n \n#route for collecting info use in AJAX \n@app.route(\"/results/request\")\ndef results_request(): \n req = request.args.get('req', 0)\n #if req arg in quest1, quest2, or quest3 then get the user answers for the specified question.\n if req == \"quest1\":\n return jsonify({\"rtrn\" : User.getQuestion(current_user.id, 1)})\n elif req == \"quest2\":\n return jsonify({\"rtrn\" : User.getQuestion(current_user.id, 2)})\n elif req == \"quest3\":\n return jsonify({\"rtrn\" : User.getQuestion(current_user.id, 3)})\n #if req arg is total use other args to get count for how many of that ans is in db\n elif req == \"total\":\n return jsonify({\"rtrn\" : User.questTotal(request.args.get('quest', 0),request.args.get('ans', 0))})\n else:\n return jsonify({\"rtrn\" : \"error\"})\n \n@app.route(\"/login\")\ndef login():\n # Find out what URL to hit for Google login\n google_provider_cfg = get_google_provider_cfg()\n authorization_endpoint = google_provider_cfg[\"authorization_endpoint\"]\n\n # Use library to construct the request for login and provide\n # scopes that let you retrieve user's profile from Google\n request_uri = client.prepare_request_uri(\n authorization_endpoint,\n redirect_uri=request.base_url + \"/callback\",\n scope=[\"openid\", \"email\", \"profile\"],\n )\n return redirect(request_uri)\n\n\n@app.route(\"/login/callback\")\ndef callback():\n # Get authorization code Google sent back to you\n code = request.args.get(\"code\")\n\n # Find out what URL to hit to get tokens that allow you to ask for\n # things on behalf of a user\n google_provider_cfg = get_google_provider_cfg()\n token_endpoint = google_provider_cfg[\"token_endpoint\"]\n\n # Prepare and send request to get tokens! Yay tokens!\n token_url, headers, body = client.prepare_token_request(\n token_endpoint,\n authorization_response=request.url,\n redirect_url=request.base_url,\n code=code,\n )\n token_response = requests.post(\n token_url,\n headers=headers,\n data=body,\n auth=(GOOGLE_CLIENT_ID, GOOGLE_CLIENT_SECRET),\n )\n\n # Parse the tokens!\n client.parse_request_body_response(json.dumps(token_response.json()))\n\n # Now that we have tokens (yay) let's find and hit URL\n # from Google that gives you user's profile information,\n # including their Google Profile Image and Email\n userinfo_endpoint = google_provider_cfg[\"userinfo_endpoint\"]\n uri, headers, body = client.add_token(userinfo_endpoint)\n userinfo_response = requests.get(uri, headers=headers, data=body)\n\n # We want to make sure their email is verified.\n # The user authenticated with Google, authorized our\n # app, and now we've verified their email through Google!\n if userinfo_response.json().get(\"email_verified\"):\n unique_id = userinfo_response.json()[\"sub\"]\n users_email = userinfo_response.json()[\"email\"]\n picture = userinfo_response.json()[\"picture\"]\n users_name = userinfo_response.json()[\"given_name\"]\n else:\n return \"User email not available or not verified by Google.\", 400\n\n # Create a user in our db with the information provided\n # by Google\n user = User(\n id_=unique_id, name=users_name, email=users_email, profile_pic=picture\n )\n\n # Doesn't exist? Add to database\n if not User.get(unique_id):\n User.create(unique_id, users_name, users_email, picture)\n\n # Begin user session by logging the user in\n login_user(user)\n\n # Send user back to homepage\n return redirect(url_for(\"index\"))\n\n\n@app.route(\"/logout\")\n@login_required\ndef logout():\n logout_user()\n return redirect(url_for(\"index\"))\n\n\ndef get_google_provider_cfg():\n return requests.get(GOOGLE_DISCOVERY_URL).json()\n\n\nif __name__ == \"__main__\":\n app.run(ssl_context=\"adhoc\")\n", "sub_path": "assignments/sprint-seven/my-google/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 6207, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.environ.get", "line_number": 23, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.environ.get", "line_number": 24, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 30, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 31, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.urandom", "line_number": 31, "usage_type": "call"}, {"api_name": "flask_login.LoginManager", "line_number": 35, "usage_type": "call"}, {"api_name": "db.init_db_command", "line_number": 46, "usage_type": "call"}, {"api_name": "sqlite3.OperationalError", "line_number": 47, "usage_type": "attribute"}, {"api_name": "oauthlib.oauth2.WebApplicationClient", "line_number": 52, "usage_type": "call"}, {"api_name": "user.User.get", "line_number": 57, "usage_type": "call"}, {"api_name": "user.User", "line_number": 57, "usage_type": "name"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 62, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 65, "usage_type": "call"}, {"api_name": "flask_login.current_user.is_authenticated", "line_number": 70, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 70, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 73, "usage_type": "name"}, {"api_name": "user.User.setQuestions", "line_number": 83, "usage_type": "call"}, {"api_name": "user.User", "line_number": 83, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 83, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 83, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 84, "usage_type": "call"}, {"api_name": "flask.request.url", "line_number": 84, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 84, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 88, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 90, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 95, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 98, "usage_type": "call"}, {"api_name": "user.User.getQuestion", "line_number": 98, "usage_type": "call"}, {"api_name": "user.User", "line_number": 98, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 98, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 100, "usage_type": "call"}, {"api_name": "user.User.getQuestion", "line_number": 100, "usage_type": "call"}, {"api_name": "user.User", "line_number": 100, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 100, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 100, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 102, "usage_type": "call"}, {"api_name": "user.User.getQuestion", "line_number": 102, "usage_type": "call"}, {"api_name": "user.User", "line_number": 102, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 102, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 102, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 105, "usage_type": "call"}, {"api_name": "user.User.questTotal", "line_number": 105, "usage_type": "call"}, {"api_name": "user.User", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 105, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 105, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.request.base_url", "line_number": 119, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 119, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 122, "usage_type": "call"}, {"api_name": "flask.request.args.get", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 128, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 128, "usage_type": "name"}, {"api_name": "flask.request.url", "line_number": 138, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 138, "usage_type": "name"}, {"api_name": "flask.request.base_url", "line_number": 139, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 139, "usage_type": "name"}, {"api_name": "requests.post", "line_number": 142, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 150, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 157, "usage_type": "call"}, {"api_name": "user.User", "line_number": 172, "usage_type": "call"}, {"api_name": "user.User.get", "line_number": 177, "usage_type": "call"}, {"api_name": "user.User", "line_number": 177, "usage_type": "name"}, {"api_name": "user.User.create", "line_number": 178, "usage_type": "call"}, {"api_name": "user.User", "line_number": 178, "usage_type": "name"}, {"api_name": "flask_login.login_user", "line_number": 181, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 184, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 184, "usage_type": "call"}, {"api_name": "flask_login.logout_user", "line_number": 190, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 191, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 191, "usage_type": "call"}, {"api_name": "flask_login.login_required", "line_number": 188, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 195, "usage_type": "call"}]} +{"seq_id": "475485869", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import print_function, division\n\nfrom ase.units import kJ\nfrom sklearn.metrics import mean_squared_error\nfrom math import sqrt\n\n#rms = sqrt(mean_squared_error(y_actual, y_predicted))\nimport numpy as np\n\ntry:\n from scipy.optimize import curve_fit\nexcept ImportError:\n try:\n from scipy.optimize import leastsq\n\n # this part comes from\n # http://projects.scipy.org/scipy/browser/trunk/scipy/optimize/minpack.py\n def _general_function(params, xdata, ydata, function):\n return function(xdata, *params) - ydata\n # end of this part\n\n def curve_fit(f, x, y, p0):\n func = _general_function\n args = (x, y, f)\n # this part comes from\n # http://projects.scipy.org/scipy/browser/trunk/scipy/optimize/minpack.py\n popt, pcov, infodict, mesg, ier = leastsq(func, p0, args=args,\n full_output=1)\n\n if ier not in [1, 2, 3, 4]:\n raise RuntimeError(\"Optimal parameters not found: \" + mesg)\n # end of this part\n return popt, pcov\n except ImportError:\n curve_fit = None\n\n\neos_names = ['sj', 'taylor', 'murnaghan', 'birch', 'birchmurnaghan',\n 'pouriertarantola', 'vinet', 'antonschmidt', 'p3']\n\n\ndef taylor(V, E0, beta, alpha, V0):\n 'Taylor Expansion up to 3rd order about V0'\n\n E = E0 + beta / 2 * (V - V0)**2 / V0 + alpha / 6 * (V - V0)**3 / V0\n return E\n\n\ndef murnaghan(V, E0, B0, BP, V0):\n 'From PRB 28,5480 (1983'\n\n E = E0 + B0 * V / BP * (((V0 / V)**BP) / (BP - 1) + 1) - V0 * B0 / (BP - 1)\n return E\n\n\ndef birch(V, E0, B0, BP, V0):\n \"\"\"\n From Intermetallic compounds: Principles and Practice, Vol. I: Principles\n Chapter 9 pages 195-210 by M. Mehl. B. Klein, D. Papaconstantopoulos\n paper downloaded from Web\n\n case where n=0\n \"\"\"\n\n E = (E0 +\n 9 / 8 * B0 * V0 * ((V0 / V)**(2 / 3) - 1)**2 +\n 9 / 16 * B0 * V0 * (BP - 4) * ((V0 / V)**(2 / 3) - 1)**3)\n return E\n\n\ndef birchmurnaghan(V, E0, B0, BP, V0):\n \"\"\"\n BirchMurnaghan equation from PRB 70, 224107\n Eq. (3) in the paper. Note that there's a typo in the paper and it uses\n inversed expression for eta.\n \"\"\"\n\n eta = (V0 / V)**(1 / 3)\n E = E0 + 9 * B0 * V0 / 16 * (eta**2 - 1)**2 * (\n 6 + BP * (eta**2 - 1) - 4 * eta**2)\n return E\n\n\ndef check_birchmurnaghan():\n from sympy import symbols, Rational, diff, simplify\n v, b, bp, v0 = symbols('v b bp v0')\n x = (v0 / v)**Rational(2, 3)\n e = 9 * b * v0 * (x - 1)**2 * (6 + bp * (x - 1) - 4 * x) / 16\n print(e)\n B = diff(e, v, 2) * v\n BP = -v * diff(B, v) / b\n print(simplify(B.subs(v, v0)))\n print(simplify(BP.subs(v, v0)))\n\n\ndef pouriertarantola(V, E0, B0, BP, V0):\n 'Pourier-Tarantola equation from PRB 70, 224107'\n\n eta = (V / V0)**(1 / 3)\n squiggle = -3 * np.log(eta)\n\n E = E0 + B0 * V0 * squiggle**2 / 6 * (3 + squiggle * (BP - 2))\n return E\n\n\ndef vinet(V, E0, B0, BP, V0):\n 'Vinet equation from PRB 70, 224107'\n\n eta = (V / V0)**(1 / 3)\n\n E = (E0 + 2 * B0 * V0 / (BP - 1)**2 *\n (2 - (5 + 3 * BP * (eta - 1) - 3 * eta) *\n np.exp(-3 * (BP - 1) * (eta - 1) / 2)))\n return E\n\n\ndef antonschmidt(V, Einf, B, n, V0):\n \"\"\"From Intermetallics 11, 23-32 (2003)\n\n Einf should be E_infinity, i.e. infinite separation, but\n according to the paper it does not provide a good estimate\n of the cohesive energy. They derive this equation from an\n empirical formula for the volume dependence of pressure,\n\n E(vol) = E_inf + int(P dV) from V=vol to V=infinity\n\n but the equation breaks down at large volumes, so E_inf\n is not that meaningful\n\n n should be about -2 according to the paper.\n\n I find this equation does not fit volumetric data as well\n as the other equtions do.\n \"\"\"\n\n E = B * V0 / (n + 1) * (V / V0)**(n + 1) * (np.log(V / V0) -\n (1 / (n + 1))) + Einf\n return E\n\n\ndef p3(V, c0, c1, c2, c3):\n 'polynomial fit'\n\n E = c0 + c1 * V + c2 * V**2 + c3 * V**3\n return E\n\n\ndef parabola(x, a, b, c):\n \"\"\"parabola polynomial function\n\n this function is used to fit the data to get good guesses for\n the equation of state fits\n\n a 4th order polynomial fit to get good guesses for\n was not a good idea because for noisy data the fit is too wiggly\n 2nd order seems to be sufficient, and guarantees a single minimum\"\"\"\n\n return a + b * x + c * x**2\n\n\nclass EquationOfState:\n \"\"\"Fit equation of state for bulk systems.\n\n The following equation is used::\n\n sjeos (default)\n A third order inverse polynomial fit 10.1103/PhysRevB.67.026103\n\n ::\n\n 2 3 -1/3\n E(V) = c + c t + c t + c t , t = V\n 0 1 2 3\n\n taylor\n A third order Taylor series expansion about the minimum volume\n\n murnaghan\n PRB 28, 5480 (1983)\n\n birch\n Intermetallic compounds: Principles and Practice,\n Vol I: Principles. pages 195-210\n\n birchmurnaghan\n PRB 70, 224107\n\n pouriertarantola\n PRB 70, 224107\n\n vinet\n PRB 70, 224107\n\n antonschmidt\n Intermetallics 11, 23-32 (2003)\n\n p3\n A third order polynomial fit\n\n Use::\n\n eos = EquationOfState(volumes, energies, eos='murnaghan')\n v0, e0, B = eos.fit()\n eos.plot()\n\n \"\"\"\n def __init__(self, volumes, energies, eos='sj'):\n self.v = np.array(volumes)\n self.e = np.array(energies)\n\n if eos == 'sjeos':\n eos = 'sj'\n self.eos_string = eos\n self.v0 = None\n\n def fit(self):\n \"\"\"Calculate volume, energy, and bulk modulus.\n\n Returns the optimal volume, the minimum energy, and the bulk\n modulus. Notice that the ASE units for the bulk modulus is\n eV/Angstrom^3 - to get the value in GPa, do this::\n\n v0, e0, B = eos.fit()\n print(B / kJ * 1.0e24, 'GPa')\n\n \"\"\"\n\n if self.eos_string == 'sj':\n return self.fit_sjeos()\n\n self.func = globals()[self.eos_string]\n\n p0 = [min(self.e), 1, 1]\n popt, pcov = curve_fit(parabola, self.v, self.e, p0)\n\n parabola_parameters = popt\n # Here I just make sure the minimum is bracketed by the volumes\n # this if for the solver\n minvol = min(self.v)\n maxvol = max(self.v)\n\n # the minimum of the parabola is at dE/dV = 0, or 2 * c V +b =0\n c = parabola_parameters[2]\n b = parabola_parameters[1]\n a = parabola_parameters[0]\n parabola_vmin = -b / 2 / c\n\n catch_warn = False\n\n if not (minvol < parabola_vmin and parabola_vmin < maxvol):\n catch_warn=True\n print('Warning the minimum volume of a fitted parabola is not in '\n 'your volumes. You may not have a minimum in your dataset')\n\n # evaluate the parabola at the minimum to estimate the groundstate\n # energy\n E0 = parabola(parabola_vmin, a, b, c)\n # estimate the bulk modulus from Vo * E''. E'' = 2 * c\n B0 = 2 * c * parabola_vmin\n\n if self.eos_string == 'antonschmidt':\n BP = -2\n else:\n BP = 4\n\n initial_guess = [E0, B0, BP, parabola_vmin]\n\n # now fit the equation of state\n\n if not catch_warn:\n p0 = initial_guess\n popt, pcov = curve_fit(self.func, self.v, self.e, p0)\n\n self.eos_parameters = popt\n self.eos_errors = np.sqrt(np.diag(pcov))\n\n if self.eos_string == 'p3':\n c0, c1, c2, c3 = self.eos_parameters\n # find minimum E in E = c0 + c1 * V + c2 * V**2 + c3 * V**3\n # dE/dV = c1+ 2 * c2 * V + 3 * c3 * V**2 = 0\n # solve by quadratic formula with the positive root\n\n a = 3 * c3\n b = 2 * c2\n c = c1\n\n self.v0 = (-b + np.sqrt(b**2 - 4 * a * c)) / (2 * a)\n self.e0 = p3(self.v0, c0, c1, c2, c3)\n self.B = (2 * c2 + 6 * c3 * self.v0) * self.v0\n else:\n self.v0 = self.eos_parameters[3]\n self.v0_err = self.eos_errors[3]\n self.e0 = self.eos_parameters[0]\n self.e0_err = self.eos_errors[0]\n self.B = self.eos_parameters[1]\n self.B_err = self.eos_errors[1]\n self.BP = self.eos_parameters[2]\n self.BP_errors = self.eos_errors[2]\n # print (self.func(self.v,self.eos_parameters[0],self.eos_parameters[1],self.eos_parameters[2],self.eos_parameters[3]))\n\n return self.e0, self.e0_err, self.v0, self.v0_err,\\\n self.B, self.B_err, self.BP , self.BP_errors, self.func(self.v,self.eos_parameters[0],self.eos_parameters[1],self.eos_parameters[2],self.eos_parameters[3])\n else:\n return 'Warn', 'Warn', 'Warn', 'Warn', 'Warn', 'Warn', 'Warn', 'Warn', 'Warn'\n\n def plot(self, filename=None, show=None, ax=None):\n \"\"\"Plot fitted energy curve.\n\n Uses Matplotlib to plot the energy curve. Use *show=True* to\n show the figure and *filename='abc.png'* or\n *filename='abc.eps'* to save the figure to a file.\"\"\"\n\n import matplotlib.pyplot as plt\n\n if self.v0 is None:\n self.fit()\n\n if filename is None and show is None:\n show = True\n\n if ax is None:\n ax = plt.gca()\n\n x = np.linspace(min(self.v), max(self.v), 100)\n if self.eos_string == 'sj':\n y = self.fit0(x**-(1 / 3))\n else:\n y = self.func(x, *self.eos_parameters)\n\n ax.plot(x, y, '-r')\n ax.plot(self.v, self.e, 'o')\n\n try:\n ax.set_xlabel(u'volume [Å$^3$]')\n ax.set_ylabel(u'energy [eV]')\n ax.set_title(u'%s: E: %.3f eV, V: %.3f Å$^3$, B: %.3f GPa' %\n (self.eos_string, self.e0, self.v0,\n self.B / kJ * 1.e24))\n\n except ImportError: # XXX what would cause this error? LaTeX?\n ax.set_xlabel(u'volume [L(length)^3]')\n ax.set_ylabel(u'energy [E(energy)]')\n ax.set_title(u'%s: E: %.3f E, V: %.3f L^3, B: %.3e E/L^3' %\n (self.eos_string, self.e0, self.v0, self.B))\n\n if show:\n plt.show()\n if filename is not None:\n fig = ax.get_figure()\n fig.savefig(filename)\n\n return ax\n\n def fit_sjeos(self):\n \"\"\"Calculate volume, energy, and bulk modulus.\n\n Returns the optimal volume, the minimum energy, and the bulk\n modulus. Notice that the ASE units for the bulk modulus is\n eV/Angstrom^3 - to get the value in GPa, do this::\n\n v0, e0, B = eos.fit()\n print(B / kJ * 1.0e24, 'GPa')\n\n \"\"\"\n\n fit0 = np.poly1d(np.polyfit(self.v**-(1 / 3), self.e, 3))\n fit1 = np.polyder(fit0, 1)\n fit2 = np.polyder(fit1, 1)\n\n self.v0 = None\n for t in np.roots(fit1):\n if isinstance(t, float) and t > 0 and fit2(t) > 0:\n self.v0 = t**-3\n break\n\n if self.v0 is None:\n raise ValueError('No minimum!')\n\n self.e0 = fit0(t)\n self.B = t**5 * fit2(t) / 9\n self.fit0 = fit0\n\n return self.v0, self.e0, self.B\n\n\ndef main():\n import optparse\n from ase.io import read\n parser = optparse.OptionParser(usage='python -m ase.eos [options] '\n 'trajectory, ...',\n description='Calculate equation of state.')\n parser.add_option('-p', '--plot', action='store_true')\n parser.add_option('-t', '--type', default='sj')\n opts, args = parser.parse_args()\n if not opts.plot:\n print('# filename '\n 'points volume energy bulk modulus')\n print('# '\n ' [Ang^3] [eV] [GPa]')\n for name in args:\n if name == '-':\n # Special case - used by ase-gui:\n import pickle\n import sys\n if sys.version_info[0] == 2:\n v, e = pickle.load(sys.stdin)\n else:\n v, e = pickle.load(sys.stdin.buffer)\n else:\n if '@' in name:\n index = None\n else:\n index = ':'\n images = read(name, index=index)\n v = [atoms.get_volume() for atoms in images]\n e = [atoms.get_potential_energy() for atoms in images]\n eos = EquationOfState(v, e, opts.type)\n if opts.plot:\n eos.plot()\n else:\n try:\n v0, e0, B = eos.fit()\n except ValueError as ex:\n print('{0:30}{1:2} {2}'.format(name, len(v), ex.message))\n else:\n print('{0:30}{1:2} {2:10.3f}{3:10.3f}{4:14.3f}'\n .format(name, len(v), v0, e0, B / kJ * 1.0e24))\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "CompleteApp/eos.py", "file_name": "eos.py", "file_ext": "py", "file_size_in_byte": 13301, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "scipy.optimize.leastsq", "line_number": 28, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 36, "usage_type": "name"}, {"api_name": "sympy.symbols", "line_number": 87, "usage_type": "call"}, {"api_name": "sympy.Rational", "line_number": 88, "usage_type": "call"}, {"api_name": "sympy.diff", "line_number": 91, "usage_type": "call"}, {"api_name": "sympy.diff", "line_number": 92, "usage_type": "call"}, {"api_name": "sympy.simplify", "line_number": 93, "usage_type": "call"}, {"api_name": "sympy.simplify", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 210, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 235, "usage_type": "call"}, {"api_name": "scipy.optimize.curve_fit", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.diag", "line_number": 276, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 288, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gca", "line_number": 323, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 323, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 325, "usage_type": "call"}, {"api_name": "ase.units.kJ", "line_number": 339, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 348, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 348, "usage_type": "name"}, {"api_name": "numpy.poly1d", "line_number": 367, "usage_type": "call"}, {"api_name": "numpy.polyfit", "line_number": 367, "usage_type": "call"}, {"api_name": "numpy.polyder", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.polyder", "line_number": 369, "usage_type": "call"}, {"api_name": "numpy.roots", "line_number": 372, "usage_type": "call"}, {"api_name": "optparse.OptionParser", "line_number": 390, "usage_type": "call"}, {"api_name": "sys.version_info", "line_number": 406, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 407, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 407, "usage_type": "attribute"}, {"api_name": "pickle.load", "line_number": 409, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 409, "usage_type": "attribute"}, {"api_name": "ase.io.read", "line_number": 415, "usage_type": "call"}, {"api_name": "{'plt': 'matplotlib.pyplot'}", "line_number": 418, "usage_type": "call"}, {"api_name": "ase.units.kJ", "line_number": 428, "usage_type": "name"}]} +{"seq_id": "561608480", "text": "\"\"\"empty message\n\nRevision ID: 364a17315243\nRevises: 4c380ffb6dab\nCreate Date: 2015-09-05 20:45:08.212000\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '364a17315243'\ndown_revision = '4c380ffb6dab'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('adopt')\n op.drop_table('shelter')\n op.drop_table('puppy')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('puppy',\n sa.Column('id', sa.INTEGER(), nullable=False),\n sa.Column('name', sa.VARCHAR(length=250), nullable=False),\n sa.Column('gender', sa.VARCHAR(length=16), nullable=False),\n sa.Column('dateOfBirth', sa.DATE(), nullable=True),\n sa.Column('picture', sa.VARCHAR(), nullable=True),\n sa.Column('shelter_id', sa.INTEGER(), nullable=True),\n sa.Column('weight', sa.NUMERIC(precision=10), nullable=True),\n sa.ForeignKeyConstraint(['shelter_id'], [u'shelter.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('shelter',\n sa.Column('id', sa.INTEGER(), nullable=False),\n sa.Column('name', sa.VARCHAR(length=80), nullable=False),\n sa.Column('address', sa.VARCHAR(length=250), nullable=True),\n sa.Column('city', sa.VARCHAR(length=80), nullable=True),\n sa.Column('state', sa.VARCHAR(length=40), nullable=True),\n sa.Column('zipCode', sa.VARCHAR(length=10), nullable=True),\n sa.Column('website', sa.VARCHAR(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('adopt',\n sa.Column('id', sa.INTEGER(), nullable=False),\n sa.Column('name', sa.VARCHAR(length=250), nullable=False),\n sa.PrimaryKeyConstraint('id')\n )\n ### end Alembic commands ###\n", "sub_path": "problem set 2/puppies-backend/migrations/versions/364a17315243_.py", "file_name": "364a17315243_.py", "file_ext": "py", "file_size_in_byte": 1791, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "alembic.op.drop_table", "line_number": 19, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 19, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 20, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 20, "usage_type": "name"}, {"api_name": "alembic.op.drop_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "alembic.op.create_table", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 28, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 29, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.DATE", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 32, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 33, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.NUMERIC", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.ForeignKeyConstraint", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 36, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 38, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 38, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 39, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 40, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 41, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 42, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 43, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 44, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 44, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 45, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 45, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 46, "usage_type": "call"}, {"api_name": "alembic.op.create_table", "line_number": 48, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 48, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 49, "usage_type": "call"}, {"api_name": "sqlalchemy.INTEGER", "line_number": 49, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 50, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 50, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "514093621", "text": "import sys\nimport numpy as np\nfrom collections import defaultdict\n\n\ninput_text = map(str.strip, open('06.txt').readlines())\n\ncache = {}\nfor i, line in enumerate(input_text):\n cache[i + 1] = map(int, line.split(','))\n\n\ndef find_min_point_and_distance(x, y):\n min_dist = sys.maxint\n distance = 0\n min_i = []\n for i, coords in cache.items():\n distance += abs(x - coords[0]) + abs(y - coords[1])\n if min_dist > abs(x - coords[0]) + abs(y - coords[1]):\n min_dist = abs(x - coords[0]) + abs(y - coords[1])\n min_i = [i]\n elif min_dist == abs(x - coords[0]) + abs(y - coords[1]):\n min_i.append(i)\n if len(min_i) == 1:\n return min_i[0], distance\n else:\n return -1, distance\n\n\nmax_x = max(x[0] for x in cache.values()) + 1\nmax_y = max(x[1] for x in cache.values()) + 1\n\nm = np.zeros((max_x, max_y))\nmmm = np.zeros((max_x, max_y))\nfor x in range(max_x):\n for y in range(max_y):\n m[x, y], mmm[x, y] = find_min_point_and_distance(x, y)\n\n\ninfinites = set(list(m[0, :]) + list(m[-1, :]) + list(m[:, 0]) + list(m[:, -1]))\n\ntotal = defaultdict(int)\nfor x in range(max_x):\n for y in range(max_y):\n if m[x, y] not in infinites:\n total[m[x, y]] += 1\n\n# part 1\nprint(max(total.values()))\n\n# part 2\nprint((mmm < 10000).sum())\n", "sub_path": "day06.py", "file_name": "day06.py", "file_ext": "py", "file_size_in_byte": 1324, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.maxint", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "110939411", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n読み出しデータの分散を見る。\nCreated on Fri Aug 11 01:21:23 2017\n\n@author: NV\n\"\"\"\nfileNo = [ '0012']\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nimport sys\nsys.path.append(os.pardir)\nimport modu_htn as htn\nimport glob\nplt.close('all')\nhtn.setFigParam()\n###############################################################\npulseNumDict ={'xy16N':16, 'xy16':16, 'xy8':8, 'spinecho': 1}# number of pi pulse\nNList =[]\n\ndef calcData(data0, data0ref, data1, data1ref):\n \"\"\"\n ここは解析の種類によって変える。\n \"\"\"\n# return data0/data0ref - data1/data1ref\n return data0 - data0ref - ( data1 - data1ref) # subpico論文の方法\n\ndef anaSTD(fileNos, start_indx=0):\n \"\"\"\n 各データの分散を計算する。\n \"\"\"\n stds = []\n for fileNo in fileNos:\n npzfile = np.load(glob.glob(fileNo +'*.npz')[0])\n \n data0, data1, data0ref, data1ref = htn.get_data01_01ref(npzfile)\n data = calcData(data0, data0ref, data1, data1ref)\n #config = npzfile['config'].item()\n #time = npzfile['xdata']\n \n stds.append(np.std(data[start_indx:]))\n \n ### pulse num ###\n config = npzfile['config'].item()\n if config['method'] == 'spinecho':\n N= 2*pulseNumDict[config['method']]# each pi pulse had tau-pi-tau\n else:\n N = 2*config['Nconst']*pulseNumDict[config['method']]#total pulse \n NList.append(N) \n\n return NList, stds\n\n\n\n\n\nNList, stds = anaSTD(fileNo, start_indx=2)\nprint(\"std\",stds)\nif len(fileNo)==1:\n npzfile = np.load(glob.glob(fileNo[0] +'*.npz')[0])\n data0, data1, data0ref, data1ref = htn.get_data01_01ref(npzfile)\n data = calcData(data0, data0ref, data1, data1ref)\n plt.figure(figsize = htn.calcFigSize(column =1, h_ratio= 0.8))\n plt.plot(data, 'bo-')\n plt.xlabel('Data index')\n plt.ylabel('Voltage (V)')\n plt.tight_layout() \n plt.savefig(fileNo[0] +\"_stability.png\",dpi=300)\n \nelif len(fileNo)>1:\n plt.figure(figsize = htn.calcFigSize(column =1, h_ratio= 0.8))\n #plt.title('pi pulse fix')\n plt.plot(NList, stds, 'bo-')\n #plt.xscale(\"log\")\n plt.yscale(\"log\")\n plt.xlabel('Pulse number')\n plt.tight_layout()\n #plt.savefig(\"52-58_stability_echo_XY16.png\",dpi=300)\n", "sub_path": "AnaTools/ana_npz_data_std.py", "file_name": "ana_npz_data_std.py", "file_ext": "py", "file_size_in_byte": 2320, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.path.append", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.pardir", "line_number": 14, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.close", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 17, "usage_type": "name"}, {"api_name": "modu_htn.setFigParam", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 36, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 36, "usage_type": "call"}, {"api_name": "modu_htn.get_data01_01ref", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 62, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 62, "usage_type": "call"}, {"api_name": "modu_htn.get_data01_01ref", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "modu_htn.calcFigSize", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 73, "usage_type": "name"}, {"api_name": "modu_htn.calcFigSize", "line_number": 73, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 75, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yscale", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}]} +{"seq_id": "324384316", "text": "import requests\nfrom bs4 import BeautifulSoup\nimport bs4\nimport time\nimport sys\n\nsys.path.append(\"你的模块路径\")\n\n\ndef load_words():\n with open('1.txt') as word_file:\n valid_words = set(word_file.read().split())\n\n return valid_words\n\n\ndef get_Colins_Soup(word):\n global startingPoint\n kv = {\n # type 'about:version' in the browser, pretending to be a real browser\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/81.0.4044.129 Safari/537.36 '\n }\n url = 'http://www.iciba.com/' + word\n try:\n r = requests.get(url, headers=kv)\n r.raise_for_status()\n r.encoding = r.apparent_encoding\n soup = BeautifulSoup(r.text, \"html.parser\")\n return soup.div.find(attrs={\"class\": \"collins-section\"})\n except:\n print(\"404!\")\n time.sleep(2)\n # startingPoint = int(input(\"where to start?\"))\n startingPoint = latest\n main()\n\n\n\ndef get_number_of_M(soup):\n count = 0\n for i in soup.find_all(attrs={\"class\": \"prep-order-icon\"}):\n if isinstance(i, bs4.element.Tag):\n if i.string != '':\n count += 1\n return count\n\n\ndef get_family_english(soup):\n global startingPoint\n list = []\n alist = []\n flist = []\n for a in range(65, 92):\n alist += chr(a)\n alist = tuple(alist)\n try:\n for i in soup.find_all(attrs={\"class\": 'family-english'}):\n if i is not None:\n list += i\n strrr = ''\n except:\n print(\"Sorry, I cannot find the meaning of this word.\")\n with open('2.txt', 'a+', encoding='utf-8') as f:\n f.write(\"%d\" % startingPoint + '\\r\\n')\n f.write('Sorry, I cannot find this word \\r\\n')\n f.close()\n print(\"404!\")\n time.sleep(2)\n startingPoint = latest + 1\n main()\n\n for sen in list:\n ss = str(sen)\n if ss.endswith(alist):\n strrr += sen + ' '\n flist = strrr.split(' ')\n flist.pop()\n return flist\n\n\ndef get_family_english_meaning(soup):\n list = []\n for i in soup.find_all(attrs={\"class\": 'family-english size-english prep-en'}):\n if i != None:\n list += i\n return list\n\n\ndef get_family_chinese(soup):\n list = []\n alist = []\n flist = []\n for i in soup.find_all(attrs={\"class\": 'family-chinese size-chinese'}):\n list += i\n for j in soup.find_all(attrs={\"class\": 'family-chinese'}):\n alist += j\n for a in alist:\n if a not in list:\n flist.append(a)\n # avoid add the Chinese meaning of the sentences, but add the word meanings\n return flist\n\n\ndef main():\n global startingPoint\n global latest\n count = startingPoint - 1\n for word in words[startingPoint - 1:]:\n count = count + 1\n print(count)\n\n latest = count\n\n # time.sleep(1)\n\n listA = []\n listB = []\n listC = []\n soup = get_Colins_Soup(word)\n # n = get_number_of_M(soup)\n\n listA = get_family_english(soup)\n n = len(listA)\n listB = get_family_english_meaning(soup)\n listC = get_family_chinese(soup)\n meaning = []\n if listA != []:\n for i in range(n):\n try:\n print(listA[i])\n print(listB[i])\n print(listC[i])\n print()\n with open('2.txt', 'a+', encoding='utf-8') as f:\n if i == 0:\n f.write(\"%d\" % count + '\\r\\n')\n f.write(listA[i] + '\\r\\n')\n f.write(listB[i] + '\\r\\n')\n f.write(listC[i] + '\\r\\n')\n f.close()\n except:\n print(\"Sorry, I cannot find the meaning of this word.\")\n with open('2.txt', 'a+', encoding='utf-8') as f:\n f.write(\"%d\" % count + '\\r\\n')\n f.write('Sorry, I cannot find this word \\r\\n')\n f.close()\n else:\n print(\"Sorry, I cannot find the meaning of this word.\")\n with open('2.txt', 'a+', encoding='utf-8') as f:\n f.write(\"%d\" % count + '\\r\\n')\n f.write('Sorry, I cannot find this word \\r\\n')\n f.close()\n\n\nglobal startingPoint\nglobal latest\nwords = []\nfor line in open('popular.txt', 'r'):\n rs = line.replace('\\n', '')\n words.append(rs)\nprint(len(words))\nstartingPoint = int(input(\"where to start?\"))\ntry:\n main()\nexcept:\n print(\"404!\")\n time.sleep(2)\n # startingPoint = int(input(\"where to start?\"))\n startingPoint = latest\n main()\n\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4759, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 26, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 29, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}, {"api_name": "bs4.element", "line_number": 43, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 69, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 168, "usage_type": "call"}]} +{"seq_id": "221548916", "text": "#-*-coding:utf-8-*-\nimport time\nimport datetime\nimport pyaudio\nimport wave\n#import spidev\nimport RPi.GPIO as GPIO\nimport os\n#import threading\n#import warnings\nfrom multiprocessing import Queue\nfrom multiprocessing import Process,Value\n#import numpy as np\n\n \n \n#def FILEW(q, r):\ndef FILEW(q): \n \n wf = wave.open(WAVE_FILE, 'wb')\n wf.setnchannels(CHANNELS)\n wf.setsampwidth(audio.get_sample_size(FORMAT))\n wf.setframerate(RATE)\n \n while flg.value:\n \n time.sleep(fw_inv)\n \n while not q.empty():\n wf.writeframes(q.get())\n \n wf.close() \n #fp.close()\n print(\"END FILEW\")\n\n\ndef callbackMIC(in_data, frame_count, time_info, status):\n global q \n\n q.put(in_data)\n \n return(None, pyaudio.paContinue)\n\n \n\n \n# =============================================================================\n# PARAM SETTING\n# =============================================================================\nUSB_saved_EN = 0\nMeas_fixed_time = 0\nusb_path = \"/media/pi/8133-A825/data\"\nfw_inv = 1\n\n\n# =============================================================================\n# pyaudio\n# =============================================================================\nFORMAT = pyaudio.paInt32\nCHANNELS = 2\nRATE = 32000#32000#\nCHUNK = 2048\nRECORD_SECONDS = 185\naudio = pyaudio.PyAudio()\n\n\n# =============================================================================\n# GPIO setting\n# =============================================================================\n#P_INT = 4\nP_SW1 = 17\n#P_SW2 = 13\n#P_LED = 17\n\nGPIO.setmode(GPIO.BCM)\n#GPIO.setup(P_INT, GPIO.IN, GPIO.PUD_UP) # MPU920 INTpin\nGPIO.setup(P_SW1, GPIO.IN, GPIO.PUD_UP) # extSW1\n#GPIO.setup(P_SW2, GPIO.IN, GPIO.PUD_UP) # extSW2 for shutdown\n#GPIO.setup(P_LED, GPIO.OUT) # LED\n\n\ntime.sleep(0.1)\n\n\n# =============================================================================\n# MAKE MAIN DIR\n# =============================================================================\nday_dir = str( datetime.datetime.now().strftime(\"%Y%m%d\") )\n\nif USB_saved_EN == 1:\n pass\nelse:\n homepath = \"/home/pi/\"\n if not os.path.isdir( homepath + day_dir ):\n os.mkdir( homepath + day_dir )\n\n\n# =============================================================================\n# Queue\n# =============================================================================\nq = Queue()\nflg = Value('d', 1.0)\n\n#################\n# Meas loop #\n#################\nif __name__ == \"__main__\":\n \n \n print(\"waiting...\")\n \n \n #########################################################\n GPIO.wait_for_edge(P_SW1, GPIO.FALLING, bouncetime=500)\n #########################################################\n \n\n ############## MAKE SUB DIR #############\n time_now_dir = datetime.datetime.now().strftime('%H%M%S')\n day_dir_ = day_dir +\"/\"+ time_now_dir\n \n if USB_saved_EN == 1:\n pass\n else:\n os.mkdir( homepath + day_dir_ )\n \n ############## MAKE FILE ################\n print(\"File name:%s\"%(time_now_dir))\n print(\"USB_saved_EN:%d\"%(USB_saved_EN))\n if Meas_fixed_time ==1:\n print(\"Meas_fixed_time:%d\"%(Meas_fixed_time))\n print(\"RECORD_SECONDS %f\"%(RECORD_SECONDS))\n else:\n print(\"Start/Stop Mode\")\n print(\"CHUNK %d\"%(CHUNK))\n print(\"Recording...\")\n print(datetime.datetime.now())\n if USB_saved_EN == 1:\n WAVE_FILE = usb_path +\"/\"+ \"%s_mic.wav\"%(time_now_dir)\n #ACC_FILE = usb_path +\"/\"+ \"%s_acc.csv\"%(time_now_dir)\n \n else:\n #ACC_FILE = \"%s_acc.csv\"%(time_now_dir)\n WAVE_FILE = homepath + day_dir_ +\"/\"+ \"%s_mic.wav\"%(time_now_dir)\n #ACC_FILE = homepath + day_dir_ +\"/\"+ \"%s_acc.csv\"%(time_now_dir) \n \n\n stream = audio.open(\n format=FORMAT,\n channels=CHANNELS,\n rate=RATE,\n input=True,\n input_device_index = 0,\n frames_per_buffer=CHUNK,\n stream_callback=callbackMIC\n )\n \n\n p = Process( target = FILEW, args=(q,) )\n p.start()\n \n stream.start_stream()\n \n\n if Meas_fixed_time == 1:\n ##########################\n time.sleep(RECORD_SECONDS)\n ##########################\n\n else:\n #######################################################\n GPIO.wait_for_edge(P_SW1, GPIO.FALLING, bouncetime=500)\n #######################################################\n \n \n flg.value = 0.0 # write file STOP(subp)\n p.join()\n \n stream.stop_stream()\n stream.close()\n\n GPIO.cleanup()\n audio.terminate()\n ###spi.close()\n\n \n os.system(\"sudo bash /home/pi/reset.sh\") # echo 3 > /proc/sys/vm/drop_caches\n print(\"END\")\n", "sub_path": "mic_stereo.py", "file_name": "mic_stereo.py", "file_ext": "py", "file_size_in_byte": 4713, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "wave.open", "line_number": 20, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 27, "usage_type": "call"}, {"api_name": "pyaudio.paContinue", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pyaudio.paInt32", "line_number": 59, "usage_type": "attribute"}, {"api_name": "pyaudio.PyAudio", "line_number": 64, "usage_type": "call"}, {"api_name": "RPi.GPIO.setmode", "line_number": 75, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 75, "usage_type": "name"}, {"api_name": "RPi.GPIO.BCM", "line_number": 75, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setup", "line_number": 77, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 77, "usage_type": "name"}, {"api_name": "RPi.GPIO.IN", "line_number": 77, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.PUD_UP", "line_number": 77, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 88, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 95, "usage_type": "call"}, {"api_name": "multiprocessing.Queue", "line_number": 101, "usage_type": "call"}, {"api_name": "multiprocessing.Value", "line_number": 102, "usage_type": "call"}, {"api_name": "RPi.GPIO.wait_for_edge", "line_number": 114, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 114, "usage_type": "name"}, {"api_name": "RPi.GPIO.FALLING", "line_number": 114, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 119, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 125, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 137, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 137, "usage_type": "attribute"}, {"api_name": "multiprocessing.Process", "line_number": 159, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 167, "usage_type": "call"}, {"api_name": "RPi.GPIO.wait_for_edge", "line_number": 172, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 172, "usage_type": "name"}, {"api_name": "RPi.GPIO.FALLING", "line_number": 172, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.cleanup", "line_number": 182, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 182, "usage_type": "name"}, {"api_name": "os.system", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "463488942", "text": "import pika\nimport json\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\n# RabbitMQ settings\nMQ_AV_CHECK_EXCHANGE = 'execute_check'\nCONN_PARAMS = pika.URLParameters('amqp://admin:admin@localhost:5672/%2F')\n\n\ndef send_check_message(message):\n json_message = json.dumps(message)\n \n MQ_CONNECTION = pika.BlockingConnection(CONN_PARAMS)\n MQ_CHANNEL = MQ_CONNECTION.channel()\n\n MQ_CHANNEL.exchange_declare(exchange=MQ_AV_CHECK_EXCHANGE, type='fanout')\n MQ_CHANNEL.basic_publish(exchange=MQ_AV_CHECK_EXCHANGE,\n routing_key='',\n body=json_message)\n\n logger.info('Sent to MQ: %s' % json_message)\n MQ_CHANNEL.close()\n MQ_CONNECTION.close()\n", "sub_path": "execute_check/task_scheduler.py", "file_name": "task_scheduler.py", "file_ext": "py", "file_size_in_byte": 720, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "logging.getLogger", "line_number": 5, "usage_type": "call"}, {"api_name": "pika.URLParameters", "line_number": 10, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 14, "usage_type": "call"}, {"api_name": "pika.BlockingConnection", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "243891663", "text": "import glob\nimport random\nimport os\n\nfrom torch.utils.data import Dataset\nfrom skimage import io, color\nfrom skimage.transform import rescale, resize, downscale_local_mean\nimport random\nimport numpy as np\nimport torch\n\nclass ImageDataset(Dataset):\n def __init__(self, root, unaligned=False, mode='train'):\n self.unaligned = unaligned\n\n self.files_A = sorted(glob.glob(os.path.join(root, '%s/train_A' % mode) + '/*.*'))\n self.files_B = sorted(glob.glob(os.path.join(root, '%s/train_C_fixed_official' % mode) + '/*.*'))\n\n def __getitem__(self, index):\n i = random.randint(0, 48)\n j = random.randint(0, 48)\n k=random.randint(0,100)\n \n item_A=color.rgb2lab(io.imread(self.files_A[index % len(self.files_A)]))\n item_A=resize(item_A,(448,448,3))\n item_A=item_A[i:i+400,j:j+400,:]\n if k>50:\n item_A=np.fliplr(item_A)\n item_A[:,:,0]=np.asarray(item_A[:,:,0])/50.0-1.0\n item_A[:,:,1:]=2.0*(np.asarray(item_A[:,:,1:])+128.0)/255.0-1.0\n item_A=torch.from_numpy(item_A.copy()).float()\n item_A=item_A.view(400,400,3)\n item_A_l=item_A[:,:,0]\n item_A_l=item_A_l.view(400,400,1)\n item_A_l=item_A_l.transpose(0, 1).transpose(0, 2).contiguous()\n item_A=item_A.transpose(0, 1).transpose(0, 2).contiguous()\n if self.unaligned:\n item_B = color.rgb2lab(io.imread(self.files_B[random.randint(0, len(self.files_B) - 1)]))\n item_B=resize(item_B,(448,448,3))\n item_B=item_B[i:i+400,j:j+400,:]\n if k>50:\n item_B=np.fliplr(item_B)\n item_B[:,:,0]=np.asarray(item_B[:,:,0])/50.0-1.0\n item_B[:,:,1:]=2.0*(np.asarray(item_B[:,:,1:])+128.0)/255.0-1.0\n item_B=torch.from_numpy(item_B.copy()).float()\n item_B_l=item_B[:,:,0]\n item_B=item_B.view(400,400,3)\n item_B_l=item_B_l.view(400,400,1)\n item_B_l=item_B_l.transpose(0, 1).transpose(0, 2).contiguous()\n item_B=item_B.transpose(0, 1).transpose(0, 2).contiguous()\n else:\n item_B = color.rgb2lab(io.imread(self.files_B[index % len(self.files_B)]))\n item_B=resize(item_B,(448,448,3))\n item_B=item_B[i:i+400,j:j+400,:]\n if k>50:\n item_B=np.fliplr(item_B)\n item_B[:,:,0]=np.asarray(item_B[:,:,0])/50.0-1.0\n item_B[:,:,1:]=2.0*(np.asarray(item_B[:,:,1:])+128.0)/255.0-1.0\n item_B=torch.from_numpy(item_B.copy()).float()\n item_B_l=item_B[:,:,0]\n item_B=item_B.view(400,400,3)\n item_B_l=item_B_l.view(400,400,1)\n item_B_l=item_B_l.transpose(0, 1).transpose(0, 2).contiguous()\n item_B=item_B.transpose(0, 1).transpose(0, 2).contiguous()\n\n return {'A': item_A, 'B': item_B,'AL':item_A_l,'BL':item_B_l}\n\n def __len__(self):\n return max(len(self.files_A), len(self.files_B))\n\n", "sub_path": "datasets_aistd.py", "file_name": "datasets_aistd.py", "file_ext": "py", "file_size_in_byte": 2956, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "torch.utils.data.Dataset", "line_number": 12, "usage_type": "name"}, {"api_name": "glob.glob", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 20, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 21, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 22, "usage_type": "call"}, {"api_name": "skimage.color.rgb2lab", "line_number": 24, "usage_type": "call"}, {"api_name": "skimage.color", "line_number": 24, "usage_type": "name"}, {"api_name": "skimage.io.imread", "line_number": 24, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 24, "usage_type": "name"}, {"api_name": "skimage.transform.resize", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 31, "usage_type": "call"}, {"api_name": "skimage.color.rgb2lab", "line_number": 38, "usage_type": "call"}, {"api_name": "skimage.color", "line_number": 38, "usage_type": "name"}, {"api_name": "skimage.io.imread", "line_number": 38, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 38, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 38, "usage_type": "call"}, {"api_name": "skimage.transform.resize", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 44, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 45, "usage_type": "call"}, {"api_name": "skimage.color.rgb2lab", "line_number": 52, "usage_type": "call"}, {"api_name": "skimage.color", "line_number": 52, "usage_type": "name"}, {"api_name": "skimage.io.imread", "line_number": 52, "usage_type": "call"}, {"api_name": "skimage.io", "line_number": 52, "usage_type": "name"}, {"api_name": "skimage.transform.resize", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.fliplr", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 58, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 59, "usage_type": "call"}]} +{"seq_id": "20289404", "text": "# Ejercicio5\n# imprima el mensaje: \"hola mundo super CRUEL!\"\n\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom collections import OrderedDict\n\nprint(\"hola mundo SUPER cruel\")\n\n#Carga archivo dat.txt\ndata=np.loadtxt('dat.txt',delimiter=' ')\n\n#Datos X y Y\nx=data[:,0]\ny=data[:,1]\n\nplt.plot(x,y,\"m\",label=\"Global data\")\nplt.legend()\nplt.xlabel(\"Años\")\nplt.ylabel(\"Anomalías de temperatura (°C)\")\nplt.savefig('Grafica_temp.png')\n\n", "sub_path": "test.py", "file_name": "test.py", "file_ext": "py", "file_size_in_byte": 476, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "matplotlib.use", "line_number": 6, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}]} +{"seq_id": "495187096", "text": "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass Model(nn.Module):\n\n def __init__(self, n_lang, n_inp, n_hid, n_out):\n super(Model, self).__init__()\n self.n_hid = n_hid\n self.i2o = nn.Linear(n_lang+n_inp+n_hid, n_out)\n self.i2h = nn.Linear(n_lang+n_inp+n_hid, n_hid)\n self.o2o = nn.Linear(n_out+n_hid, n_out)\n\n def forward(self, lang, input, hidden):\n in_comb = torch.cat((lang, input, hidden), 1)\n output = self.i2o(in_comb)\n hidden = self.i2h(in_comb)\n out_comb = torch.cat((output, hidden), 1)\n output = self.o2o(out_comb)\n output = F.dropout(output, p=0.1)\n output = F.log_softmax(output)\n return output, hidden\n\n def init_hidden(self):\n return Variable(torch.zeros(1, self.n_hid))\n", "sub_path": "char-rnn-generation/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 855, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "torch.nn.Module", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 7, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.cat", "line_number": 17, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.nn.functional.dropout", "line_number": 22, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 22, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.autograd.Variable", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "612890119", "text": "import os\nimport patoolib\n\n\ndef is_archive(filepath):\n \"\"\"\n Checks if the file indicated by `filepath` is a valid arhive.\n\n An exception is raised if the file does not exist or is not readable.\n This is a best guess checking proceduce and the supported formats\n are those indicated by `patool`: https://github.com/wummel/patool\n \"\"\"\n assert(os.path.isfile(filepath))\n assert(os.access(filepath, os.R_OK))\n\n try:\n fmt, compression = patoolib.get_archive_format(filepath)\n patoolib.check_archive_format(fmt, compression)\n program = patoolib.find_archive_program(fmt, 'test')\n patoolib.check_program_compression(filepath, ' test', program, compression) # noqa: E501\n return True\n except patoolib.util.PatoolError:\n return False\n\n\ndef extract_archive(filepath, outdir):\n \"\"\"\n Extracts the contents of the archive in the output directory.\n\n The output directory is required and must be a valid file-system path.\n The output directory and all parent directories are created if missing.\n An exception is raised if the archive does not exist, is not readable\n or is not a valid archive (best guess, not all formats are supported).\n \"\"\"\n assert(is_archive(filepath))\n patoolib.extract_archive(\n filepath,\n verbosity=-1,\n outdir=outdir,\n interactive=False\n )\n", "sub_path": "data_mine/utils/archive_utils.py", "file_name": "archive_utils.py", "file_ext": "py", "file_size_in_byte": 1396, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.path.isfile", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.access", "line_number": 14, "usage_type": "call"}, {"api_name": "os.R_OK", "line_number": 14, "usage_type": "attribute"}, {"api_name": "patoolib.get_archive_format", "line_number": 17, "usage_type": "call"}, {"api_name": "patoolib.check_archive_format", "line_number": 18, "usage_type": "call"}, {"api_name": "patoolib.find_archive_program", "line_number": 19, "usage_type": "call"}, {"api_name": "patoolib.check_program_compression", "line_number": 20, "usage_type": "call"}, {"api_name": "patoolib.util", "line_number": 22, "usage_type": "attribute"}, {"api_name": "patoolib.extract_archive", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "262696734", "text": "\"\"\"\nCopyright (c) 2019 10x Genomics, Inc. All rights reserved.\n\nMerges multiple JSON files with barcodes to be excluded from cell calling for\nvarious reasons.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport json\nfrom tenkit.safe_json import json_sanitize\n\n\n__MRO__ = \"\"\"\nstage MERGE_EXCLUDED_BARCODES(\n in json[] barcode_exclusions,\n out json excluded_barcodes,\n src py \"stages/processing/cell_calling/merge_excluded_barcodes\",\n)\n\"\"\"\n\n\ndef main(args, outs):\n exclusions = {}\n for filename in args.barcode_exclusions:\n if filename is None or not os.path.isfile(filename):\n continue\n with open(filename, \"r\") as infile:\n data = json.load(infile)\n reason = data[\"label\"]\n for species, barcode_data in data[\"data\"].iteritems():\n if species not in exclusions:\n exclusions[species] = {}\n for barcode, metric in barcode_data.iteritems():\n if barcode in exclusions[species]:\n # This barcode was already excluded by another file\n continue\n exclusions[species][barcode] = [reason, metric]\n\n with open(outs.excluded_barcodes, \"w\") as outfile:\n json.dump(json_sanitize(exclusions), outfile, indent=4, sort_keys=True)\n", "sub_path": "mro/atac/stages/processing/cell_calling/merge_excluded_barcodes/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1331, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.path.isfile", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 29, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 41, "usage_type": "call"}, {"api_name": "tenkit.safe_json.json_sanitize", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "184013077", "text": "import scrapy\r\nimport unicodedata\r\n\r\nfrom OpB.items import NumberItem\r\n\r\nfrom scrapy.contrib.spiders import CrawlSpider, Rule\r\nfrom scrapy.contrib.linkextractors import LinkExtractor\r\nfrom dateutil.parser import parse\r\n\r\nclass BPSpider(CrawlSpider):\r\n\tname = 'myprovguide'\r\n\tallowed_domains = [\r\n\t\t'myproviderguide.com'\r\n\t\t]\r\n\tstart_urls = [\r\n\t\t'http://myproviderguide.com/dayton/escorts', \r\n\t\t'http://myproviderguide.com/columbus/escorts', \r\n\t\t'http://myproviderguide.com/cleveland/escorts', \r\n\t\t'http://myproviderguide.com/toledo/escorts', \r\n\t\t'http://myproviderguide.com/louisville/escorts', \r\n\t\t'http://myproviderguide.com/detroit/escorts', \r\n\t\t'http://myproviderguide.com/cincinnati/escorts',\r\n\t\t'http://myproviderguide.com/indianapolis/escorts', \r\n\t\t'http://myproviderguide.com/pittsburgh/escorts'\r\n\t]\r\n\trules = [Rule(LinkExtractor(allow=['/escorts/[0-9]{7,}.*']), 'parse_ad')]\r\n\t\r\n\tdef parse_ad(self, response):\r\n\t\tnumber = NumberItem()\t#Instantiate Number item\r\n\t\t\r\n\t\t#Get phone number\r\n\t\tphone_raw = response.xpath(\"//h2[@class='h1_title fontwhite']/text()\").re('\\D?(\\d{3})\\D?\\D?(\\d{3})\\D?(\\d{4})')\r\n\t\tphone_raw_back = response.xpath(\"//h1[@id='h1_title_top']/text()\").re('\\D?(\\d{3})\\D?\\D?(\\d{3})\\D?(\\d{4})')\r\n\t\t\r\n\t\t#Only concat pieces of phone number if there is one (regex returns number in 3 parts)\r\n\t\tif(len(phone_raw) == 3):\r\n\t\t\tphone_a = str(phone_raw[0])\r\n\t\t\tphone_b = str(phone_raw[1])\r\n\t\t\tphone_c = str(phone_raw[2])\r\n\t\t\r\n\t\t\tphone_t = phone_a + phone_b + phone_c\r\n\t\telif(len(phone_raw_back) == 3):\r\n\t\t\tphone_a = str(phone_raw_back[0])\r\n\t\t\tphone_b = str(phone_raw_back[1])\r\n\t\t\tphone_c = str(phone_raw_back[2])\r\n\t\t\r\n\t\t\tphone_t = phone_a + phone_b + phone_c\r\n\t\telse:\r\n\t\t\tphone_t = \"NO NUMBER\"\r\n\t\t\t\r\n\t\t#Get timestamp\r\n\t\tdatetime = response.xpath(\"//div[@id='post_bodytext']/small/text()\").extract()[0]\r\n\t\tdatetime = datetime.encode('ascii', errors='ignore') #Strip out unicode characters for parsing\r\n\t\t\r\n\t\tdatetime_p = parse(datetime).strftime(\"%m/%d/%Y %H:%M\")\r\n\t\tdate = parse(datetime).strftime(\"%m/%d/%Y\"); #Parse the time\r\n\t\ttime = parse(datetime).strftime(\"%H:%M\");\r\n\t\t\r\n\t\t\r\n\t\t#Get Title\r\n\t\ttitle = response.xpath(\"//h2[@class='h1_title fontwhite']/text()\").extract()[0].replace('\\r\\n', '')\r\n\t\t\r\n\t\t#Get City\r\n\t\tcity = response.xpath(\"//div[@id='container_breadcrumb']/nav/small/span[1]/a/text()\").extract()[0]\r\n\t\tcity = city.encode('ascii', errors='ignore') #Normalize string\r\n\t\tcity = city.split(\",\")[0] #Strip state if present\r\n\t\t\r\n\t\tnumber['URL'] = response.url\t#Set URL\r\n\t\tnumber['Number'] = phone_t\r\n\t\tnumber['DateTime'] = datetime_p\r\n\t\tnumber['Date'] = date\r\n\t\tnumber['Time'] = time\r\n\t\tnumber['Title'] = title\r\n\t\tnumber['City'] = city\r\n\t\tnumber['Source'] = \"MyProviderGuide\"\r\n\t\treturn number", "sub_path": "OpB/spiders/MPGSpider.py", "file_name": "MPGSpider.py", "file_ext": "py", "file_size_in_byte": 2717, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "scrapy.contrib.spiders.CrawlSpider", "line_number": 10, "usage_type": "name"}, {"api_name": "scrapy.contrib.spiders.Rule", "line_number": 26, "usage_type": "call"}, {"api_name": "scrapy.contrib.linkextractors.LinkExtractor", "line_number": 26, "usage_type": "call"}, {"api_name": "OpB.items.NumberItem", "line_number": 29, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 55, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 56, "usage_type": "call"}, {"api_name": "dateutil.parser.parse", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "418584327", "text": "\"\"\"\nDefinition of urls for ltfood.\n\"\"\"\n\nfrom datetime import datetime\nfrom django.urls import path\nfrom django.contrib import admin\nfrom django.contrib.auth.views import LoginView, LogoutView\nfrom app import forms, views\n\n# https://stackoverflow.com/a/51635038\nfrom django.views.static import serve\nfrom django.conf import settings\nfrom django.conf.urls import url\n\nurlpatterns = [\n path('', views.home, name='home'),\n path('contact/', views.contact, name='contact'),\n path('about/', views.about, name='about'),\n path('login/',\n LoginView.as_view\n (\n template_name='app/login.html',\n authentication_form=forms.BootstrapAuthenticationForm,\n extra_context=\n {\n 'title': 'Log in',\n 'year' : datetime.now().year,\n }\n ),\n name='login'),\n path('logout/', LogoutView.as_view(next_page='/'), name='logout'),\n path('admin/', admin.site.urls),\n url(r'^app/static/(?P.*)$', serve,{'document_root': settings.STATIC_ROOT}), \n]\n", "sub_path": "ltfood/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1065, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "app.views.home", "line_number": 17, "usage_type": "attribute"}, {"api_name": "app.views", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "app.views.contact", "line_number": 18, "usage_type": "attribute"}, {"api_name": "app.views", "line_number": 18, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "app.views.about", "line_number": 19, "usage_type": "attribute"}, {"api_name": "app.views", "line_number": 19, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LoginView.as_view", "line_number": 21, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LoginView", "line_number": 21, "usage_type": "name"}, {"api_name": "app.forms.BootstrapAuthenticationForm", "line_number": 24, "usage_type": "attribute"}, {"api_name": "app.forms", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 32, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView.as_view", "line_number": 32, "usage_type": "call"}, {"api_name": "django.contrib.auth.views.LogoutView", "line_number": 32, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 33, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 34, "usage_type": "call"}, {"api_name": "django.views.static.serve", "line_number": 34, "usage_type": "argument"}, {"api_name": "django.conf.settings.STATIC_ROOT", "line_number": 34, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "66933340", "text": "#使用DOG于SIFT进行特征的提取于描述\n#SIFT分别采用DOG和SIFT来检测关键点并提取关键点周围的特征\n#Difference of Gaussians (DOG) 通过对同一图像使用不同的高斯滤波器来获得感兴趣的区域(关键点)\n#SIFT算法会使用GOG检测关键点,并且对关键点周围的区域计算特征向量\n#SIFT算法是一种与图像比例无关的斑点检测算法\nimport cv2\nimport numpy as np\nimg = cv2.imread('E:\\\\Git\\\\opencv\\\\six\\\\resource\\\\picture_1.jpeg')\ncv2.imshow('img', img)\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\nsift = cv2.xfeatures2d.SIFT_create()\n#keypoint对象(angle, class_id, octave, pt(像素坐标),reponse, size)\n#keypoints 关键点keypoint对象List\n#descriptor 特征描述,二维矩阵\nkeypoints, descriptor = sift.detectAndCompute(gray, None)\npaper = np.ones(img.shape) * 255\npaper = cv2.drawKeypoints(image=img, outImage=paper, keypoints=keypoints, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS, color = (255, 0, 0))\ncv2.imshow('paper', paper)\ncv2.waitKey()\ncv2.destroyAllWindows()", "sub_path": "six/code/SIFT.py", "file_name": "SIFT.py", "file_ext": "py", "file_size_in_byte": 1068, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "cv2.imread", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 10, "usage_type": "attribute"}, {"api_name": "cv2.xfeatures2d.SIFT_create", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.xfeatures2d", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.drawKeypoints", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS", "line_number": 17, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 18, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 19, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "218740129", "text": "#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 19 20:17:38 2017\n\n@author: kaiyang\n\"\"\"\n\nfrom mininet.net import Mininet\nfrom mininet.node import Controller, RemoteController\nfrom mininet.cli import CLI\nfrom mininet.log import setLogLevel, info\nfrom mininet.link import Link, Intf, TCLink\nfrom mininet.topo import Topo\nfrom mininet.util import dumpNodeConnections\nimport logging\nimport os\nimport random\n\nlogging.basicConfig(filename='./fattree.log', level=logging.INFO)\nlogger = logging.getLogger(__name__)\nrandom.seed(10)\n\nclass MyFattree(Topo):\n logger.debug(\"Class Fattree\")\n CoreSwitchList = []\n AggSwitchList = []\n EdgeSwitchList = []\n HostList = []\n\n def __init__(self, k, density):\n logger.debug(\"Class Fattree init\")\n self.pod = k\n self.iCoreLayerSwitch = (k/2)**2\n self.iAggLayerSwitch = k*k/2\n self.iEdgeLayerSwitch = k*k/2\n self.density = density\n self.iHost = self.iEdgeLayerSwitch * density\n\n #Init Topo\n Topo.__init__(self)\n\n def createTopo(self):\n self.createCoreLayerSwitch(self.iCoreLayerSwitch)\n self.createAggLayerSwitch(self.iAggLayerSwitch)\n self.createEdgeLayerSwitch(self.iEdgeLayerSwitch)\n self.createHost(self.iHost)\n\n \"\"\"\n Create Switch and Host\n \"\"\"\n\n def _addSwitch(self, number, level, switch_list):\n for x in xrange(1, number+1):\n PREFIX = str(level) + \"00\"\n if x >= int(10):\n PREFIX = str(level) + \"0\"\n switch_list.append(self.addSwitch('s' + PREFIX + str(x)))\n\n def createCoreLayerSwitch(self, NUMBER):\n logger.debug(\"Create Core Layer\")\n self._addSwitch(NUMBER, 1, self.CoreSwitchList)\n\n def createAggLayerSwitch(self, NUMBER):\n logger.debug(\"Create Agg Layer\")\n self._addSwitch(NUMBER, 2, self.AggSwitchList)\n\n def createEdgeLayerSwitch(self, NUMBER):\n logger.debug(\"Create Edge Layer\")\n self._addSwitch(NUMBER, 3, self.EdgeSwitchList)\n\n def createHost(self, NUMBER):\n logger.debug(\"Create Host\")\n for x in xrange(1, NUMBER+1):\n PREFIX = \"h00\"\n if x >= int(10):\n PREFIX = \"h0\"\n elif x >= int(100):\n PREFIX = \"h\"\n self.HostList.append(self.addHost(PREFIX + str(x)))\n\n \"\"\"\n Add Link\n \"\"\"\n def createLink(self, bw_c2a=0.2, bw_a2e=0.1, bw_h2a=0.5):\n #random.seed(22)\n #-40+random.random()*40\n logger.debug(\"Add link Core to Agg.\")\n end = self.pod/2\n for x in xrange(0, self.iAggLayerSwitch, end):\n for i in xrange(0, end):\n for j in xrange(0, end):\n self.addLink(\n self.CoreSwitchList[i*end+j],\n self.AggSwitchList[x+i],\n bw=bw_c2a*(1+random.random()), delay='1ms', loss=0, max_queue_size=1000, use_htb=True)\n\n logger.debug(\"Add link Agg to Edge.\")\n for x in xrange(0, self.iAggLayerSwitch, end):\n for i in xrange(0, end):\n for j in xrange(0, end):\n self.addLink(\n self.AggSwitchList[x+i], self.EdgeSwitchList[x+j],\n bw=bw_a2e*(1+random.random()), delay='1ms', loss=0, max_queue_size=1000, use_htb=True)\n\n logger.debug(\"Add link Edge to Host.\")\n for x in xrange(0, self.iEdgeLayerSwitch):\n for i in xrange(0, self.density):\n self.addLink(\n self.EdgeSwitchList[x],\n self.HostList[self.density * x + i],\n bw=bw_h2a*(1+random.random()), delay='1ms', loss=0, max_queue_size=1000, use_htb=True)\n", "sub_path": "Databot/myfattree.py", "file_name": "myfattree.py", "file_ext": "py", "file_size_in_byte": 3725, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "logging.basicConfig", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 20, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 21, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 22, "usage_type": "call"}, {"api_name": "mininet.topo.Topo", "line_number": 24, "usage_type": "name"}, {"api_name": "mininet.topo.Topo.__init__", "line_number": 41, "usage_type": "call"}, {"api_name": "mininet.topo.Topo", "line_number": 41, "usage_type": "name"}, {"api_name": "random.random", "line_number": 96, "usage_type": "call"}, {"api_name": "random.random", "line_number": 104, "usage_type": "call"}, {"api_name": "random.random", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "16444548", "text": "#!/usr/bin/python\n\nimport rospy\nfrom geometry_msgs.msg import Pose, Vector3\nimport numpy as np\nimport time\nimport tf\n\nx_data, y_data = [], []\nvl, vr = 0, 0\ni_l, i_r = 0, 0\nkp_r, kp_theta = 12, 0.5\n\ndef calibration_callback(msg):\n\tx_data.append(msg.position.x)\n\ty_data.append(msg.position.z)\n\ndef threshold(val, lb, ub):\n if val < lb:\n return lb\n if val > ub:\n return ub\n return val\n\ndef control_callback(msg):\n\tglobal vl, vr, i_l, i_r\n\tdiff_r = np.sqrt(msg.orientation.x**2 + msg.orientation.y ** 2)\n\t\n\tquaternion = (msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w)\n\teuler = tf.transformations.euler_from_quaternion(quaternion)\n\tactual_yaw = euler[2] \n\tdesired_yaw = np.arctan2(-msg.position.y, -msg.position.x)\n\t\n\tdiff_theta = desired_yaw - actual_yaw\n\t#print('actual_yaw:', actual_yaw / np.pi * 180)\n\t#print('desired_yaw:', desired_yaw / np.pi * 180)\n\t\n\tprint('diff_theta:', diff_theta / np.pi*180)\n\tprint('diff_r:', diff_r)\n\tvl = kp_r * diff_r - kp_theta * diff_theta\n\tvr = kp_r * diff_r + kp_theta * diff_theta\n\tvl = threshold(vl, 0, 1)\n\tvr = threshold(vr, 0, 1)\n\tprint(vl, vr)\n'''\nCalibrates a point by repeatedly reading from OptiTrack and taking average\n'''\ndef calibrate_point():\n\tglobal x_data, y_data\n\tx_data, y_data = [], []\n\tsubscribe_calibration = rospy.Subscriber(\"/Robot_1/pose\", Pose, calibration_callback)\n\twhile len(y_data) <= 10:\n\t\tpass\n\tsubscribe_calibration.unregister()\n\tans_x, ans_y = np.mean(x_data), np.mean(y_data)\n\treturn ans_x, ans_y\n\nrospy.init_node(\"treadmill_optitrack\", anonymous=True)\n'''\n# Find center of OpenRoACH\nraw_input(\"Put OpenRoACH on treadmill center, then press any key to start calibration\")\n\ncenter_x, center_y = calibrate_point();\nprint(\"Center found at {:.3f}, {:.3f}\".format(center_x, center_y))\n\n# Find Track Orientation\nprint(\"Now let's find the orientation of treadmill by finding 2 points on the treadmill\")\nprint(\"First, put OpenRoACH at an 'earlier' position on the treadmill\")\nraw_input(\"Press Any Key to Continue...\")\nstart_x, start_y = calibrate_point();\nprint(\"Starting point found at {:.3f}, {:.3f}\".format(start_x, start_y))\n\nprint(\"Now, place OpenRoACH at a 'later' position in the treadmill. That is, assume OpenRoACH is static w.r.t. treadmill while treadmill is moving, where would it be now?\")\nraw_input(\"Press Any Key to Continue...\")\nend_x, end_y = calibrate_point();\nprint(\"Ending point found at {:.3f}, {:.3f}\".format(end_x, end_y))\n\n'''\nprint(\"Now let's see how the control goes!\")\nrospy.Subscriber(\"/Robot_1/pose\", Pose, control_callback)\n\ncontrol_pub = rospy.Publisher('/cmd_pwm', Vector3, queue_size = 1)\n\nx = 'a'\nwhile not rospy.is_shutdown():\n\tcontrol_pub.publish(Vector3(vl, vr, 0))\n\ttime.sleep(0.1);\n\n\n\n", "sub_path": "src/treadmill_optitrack.py", "file_name": "treadmill_optitrack.py", "file_ext": "py", "file_size_in_byte": 2759, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.sqrt", "line_number": 27, "usage_type": "call"}, {"api_name": "tf.transformations.euler_from_quaternion", "line_number": 30, "usage_type": "call"}, {"api_name": "tf.transformations", "line_number": 30, "usage_type": "attribute"}, {"api_name": "numpy.arctan2", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 38, "usage_type": "attribute"}, {"api_name": "rospy.Subscriber", "line_number": 51, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Pose", "line_number": 51, "usage_type": "argument"}, {"api_name": "numpy.mean", "line_number": 55, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 58, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 80, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Pose", "line_number": 80, "usage_type": "argument"}, {"api_name": "rospy.Publisher", "line_number": 82, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Vector3", "line_number": 82, "usage_type": "argument"}, {"api_name": "rospy.is_shutdown", "line_number": 85, "usage_type": "call"}, {"api_name": "geometry_msgs.msg.Vector3", "line_number": 86, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "598789776", "text": "#!/usr/bin/env python\n\nimport os\nimport argparse\nimport warnings\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport astropy.io.fits as pf\nfrom astropy import log\nfrom astropy.logger import AstropyUserWarning\n\nfrom stingray.io import load_events_and_gtis, ref_mjd\nfrom stingray.pulse.pulsar import pulse_phase, phase_exposure\nfrom .io import is_string, save_as_qdp\nfrom .base import _assign_value_if_none, hen_root\nfrom .fold import fit_profile, std_fold_fit_func\n\n\ndef outfile_name(file):\n \"\"\"Output file name for phasetag.\n\n Examples\n --------\n >>> outfile_name('file.s.a.fits.Z')\n 'file.s.a_phasetag.fits.Z'\n >>> outfile_name('file.s.a.evct.gz')\n 'file.s.a_phasetag.evct.gz'\n >>> outfile_name('file.s.a.evct')\n 'file.s.a_phasetag.evct'\n \"\"\"\n root, ext = os.path.splitext(file)\n if ext.lower() in [\".gz\", \".z\"]:\n root, newext = os.path.splitext(root)\n ext = newext + ext\n\n return root + \"_phasetag\" + ext\n\n\ndef phase_tag(\n ev_list,\n parameter_info,\n gti=None,\n mjdref=0,\n nbin=10,\n ref_to_max=False,\n pepoch=None,\n expocorr=True,\n pulse_ref_time=None,\n plot=True,\n test=False,\n):\n \"\"\"Phase-tag events in a FITS file with a given ephemeris.\n\n Parameters\n ----------\n ev_list : float\n Event times\n parameter_info : str or array of floats\n If a string, this is a pulsar parameter file that PINT is able to\n understand. Otherwise, this is a list of frequency derivatives\n [F0, F1, F2, ...]\n\n Other parameters\n ----------------\n gti : [[g0_0, g0_1], [g1_0, g1_1], ...]\n Good time intervals\n nbin : int\n Number of nbin in the pulsed profile\n ref_to_max : bool\n Automatically refer the TOAs to the maximum of the profile\n pepoch : float, default None\n Reference epoch for the timing solution. If None, this is the start\n of the observation.\n pulse_ref_time : float\n Reference time for the pulse. This overrides ref_to_max\n plot : bool\n Plot diagnostics\n expocorr : bool\n Use exposure correction when calculating the profile\n\n \"\"\"\n # ---- in MJD ----\n if gti is None:\n gti = np.array([[ev_list[0], ev_list[-1]]])\n\n ev_mjd = ev_list / 86400 + mjdref\n gti_mjd = gti / 86400 + mjdref\n\n pepoch = _assign_value_if_none(pepoch, gti_mjd[0, 0])\n\n # ------ Orbital DEMODULATION --------------------\n if is_string(parameter_info):\n raise NotImplementedError(\n \"This part is not yet implemented. Please \"\n \"use single frequencies and pepoch as \"\n \"documented\"\n )\n\n else:\n frequency_derivatives = parameter_info\n times = (ev_mjd - pepoch) * 86400\n\n f = frequency_derivatives[0]\n\n phase = pulse_phase(times, *frequency_derivatives, to_1=False)\n gti_phases = pulse_phase(\n (gti_mjd - pepoch) * 86400, *frequency_derivatives, to_1=False\n )\n\n # ------- now apply period derivatives ------\n\n log.info(\"Calculating phases...\")\n ref_phase = 0\n ref_time = 0\n\n if pulse_ref_time is not None:\n ref_time = (pulse_ref_time - pepoch) * 86400\n ref_phase = ref_time * f\n elif ref_to_max:\n phase_to1 = phase - np.floor(phase)\n\n raw_profile, bins = np.histogram(phase_to1, bins=np.linspace(0, 1, nbin + 1))\n exposure = phase_exposure(\n gti_phases[0, 0], gti_phases[-1, 1], 1, nbin=nbin, gti=gti_phases\n )\n profile = raw_profile / exposure\n profile_err = np.sqrt(raw_profile) / exposure\n\n sinpars, bu, bu = fit_profile(\n profile, profile_err, nperiods=2, baseline=True, debug=test\n )\n fine_phases = np.linspace(0, 2, 1000 * 2)\n fitted_profile = std_fold_fit_func(sinpars, fine_phases)\n maxp = np.argmax(fitted_profile)\n ref_phase = fine_phases[maxp]\n if test: # pragma: no cover\n # No tests with a pulsed profile yet\n ref_phase = bins[np.argmax(raw_profile)]\n\n ref_time = ref_phase / f\n\n phase -= ref_phase\n gti_phases -= ref_phase\n phase_to1 = phase - np.floor(phase)\n\n raw_profile, bins = np.histogram(phase_to1, bins=np.linspace(0, 1, nbin + 1))\n\n exposure = phase_exposure(\n gti_phases[0, 0], gti_phases[-1, 1], 1, nbin=nbin, gti=gti_phases\n )\n if np.any(np.logical_or(exposure != exposure, exposure == 0)):\n warnings.warn(\n \"Exposure has NaNs or zeros. Profile is not normalized\",\n AstropyUserWarning,\n )\n expocorr = False\n\n if not expocorr:\n exposure = np.ones_like(raw_profile)\n\n profile = raw_profile / exposure\n profile = np.append(profile, profile)\n exposure = np.append(exposure, exposure)\n profile_err = np.sqrt(profile)\n phs = (bins[1:] + bins[:-1]) / 2\n phs = np.append(phs, phs + 1)\n\n fig = None\n if plot:\n fig = plt.figure()\n plt.errorbar(phs, profile / exposure, yerr=profile_err / exposure, fmt=\"none\")\n plt.plot(phs, profile / exposure, \"k-\", drawstyle=\"steps-mid\")\n plt.xlabel(\"Phase\")\n plt.ylabel(\"Counts\")\n for i in range(20):\n plt.axvline(i * 0.1, ls=\"--\", color=\"b\")\n if not test: # pragma: no cover\n plt.show()\n else:\n plt.close(fig)\n\n # ------ WRITE RESULTS BACK TO FITS --------------\n results = type(\"results\", (object,), {})\n results.ev_list = ev_list\n results.phase = phase\n results.frequency_derivatives = frequency_derivatives\n results.ref_time = ref_time\n results.figure = fig\n results.plot_phase = phs\n results.plot_profile = profile / exposure\n results.plot_profile_err = profile_err / exposure\n return results\n\n\ndef phase_tag_fits(\n filename,\n parameter_info,\n gtistring=\"GTI,STDGTI\",\n gti_file=None,\n hduname=\"EVENTS\",\n column=\"TIME\",\n **kwargs\n):\n \"\"\"Phase-tag events in a FITS file with a given ephemeris.\n\n Parameters\n ----------\n filename : str\n Events FITS file\n parameter_info : str or array of floats\n If a string, this is a pulsar parameter file that PINT is able to\n understand. Otherwise, this is a list of frequency derivatives\n [F0, F1, F2, ...]\n\n Other parameters\n ----------------\n nbin : int\n Number of nbin in the pulsed profile\n ref_to_max : bool\n Automatically refer the TOAs to the maximum of the profile\n pepoch : float, default None\n Reference epoch for the timing solution. If None, this is the start\n of the observation.\n pulse_ref_time : float\n Reference time for the pulse. This overrides ref_to_max\n plot : bool\n Plot diagnostics\n expocorr : bool\n Use exposure correction when calculating the profile\n gtistring : str\n Comma-separated list of accepted GTI extensions (default GTI,STDGTI),\n with or without appended integer number denoting the detector\n gti_file : str, default None\n External GTI file\n hduname : str, default 'EVENTS'\n Name of the HDU containing the event list\n \"\"\"\n\n outfile = outfile_name(filename)\n evreturns = load_events_and_gtis(\n filename,\n gtistring=gtistring,\n gti_file=gti_file,\n hduname=hduname,\n column=column,\n )\n mjdref = ref_mjd(filename)\n\n results = phase_tag(\n evreturns.ev_list,\n parameter_info,\n gti=evreturns.gti_list,\n mjdref=mjdref,\n **kwargs\n )\n if results.figure is not None:\n results.figure.savefig(hen_root(filename) + \".pdf\")\n phase = results.phase\n frequency_derivatives = results.frequency_derivatives\n ref_time = results.ref_time\n\n phase_to1 = phase - np.floor(phase)\n\n # Save results to fits file\n hdulist = pf.open(filename, checksum=True)\n tbhdu = hdulist[hduname]\n table = tbhdu.data\n order = np.argsort(table[column])\n # load_events_and_gtis sorts the data automatically. This is not the case\n # of the other operations done here. So, let's sort the table first.\n for col in table.names:\n table.field(col)[:] = np.array(table[col])[order]\n\n # If columns already exist, overwrite them. Else, create them\n create = False\n if \"Orbit_bary\" in table.names:\n table.field(\"Orbit_bary\")[:] = evreturns.ev_list\n table.field(\"TotPhase_s\")[:] = phase / frequency_derivatives[0] + ref_time\n table.field(\"Phase\")[:] = phase_to1\n else:\n create = True\n\n # first of all, copy columns\n cols = table.columns\n\n # create new list of columns, copying all columns from other table\n newlist = []\n\n for c in cols:\n newlist.append(c)\n\n if create:\n # then, create new column with orbital demodulation\n newcol = pf.Column(\n name=\"Orbit_bary\", format=\"1D\", unit=\"s\", array=results.ev_list\n )\n\n # append it to new table\n newlist.append(newcol)\n\n # Do the same with total phase\n newcol = pf.Column(\n name=\"TotPhase_s\",\n format=\"1D\",\n unit=\"s\",\n array=phase / frequency_derivatives[0] + ref_time,\n )\n\n newlist.append(newcol)\n\n # Do the same with fractional phase\n newcol = pf.Column(name=\"Phase\", format=\"1D\", unit=\"phase\", array=phase_to1)\n\n newlist.append(newcol)\n\n # make new column definitions\n coldefs = pf.ColDefs(newlist)\n\n # create new record\n newrec = pf.FITS_rec.from_columns(coldefs)\n\n # and new hdu\n newtbhdu = pf.BinTableHDU(\n data=newrec, header=tbhdu.header.copy(), name=hduname, uint=False\n )\n\n # Copy primary HDU from old file\n prihdu = hdulist[0].copy()\n\n # define new hdulist\n newhdulist = pf.HDUList([prihdu])\n\n # Copy remaining HDUs from old file\n for h in hdulist[1:]:\n if h.name == hduname:\n newhdulist.append(newtbhdu)\n else:\n newhdulist.append(h.copy())\n\n try:\n newhdulist.writeto(outfile, overwrite=True, checksum=True)\n except Exception:\n newhdulist.writeto(outfile, overwrite=True)\n hdulist.close()\n\n save_as_qdp(\n [results.plot_phase, results.plot_profile, results.plot_profile_err],\n filename=outfile.replace(\".evt\", \"\") + \".qdp\",\n )\n\n\ndef main_phasetag(args=None):\n from .base import check_negative_numbers_in_args\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"file\", help=\"Event file\", type=str)\n parser.add_argument(\"--parfile\", help=\"Parameter file\", type=str, default=None)\n parser.add_argument(\n \"-f\",\n \"--freqs\",\n help=\"Frequency derivatives\",\n type=float,\n default=None,\n nargs=\"+\",\n )\n parser.add_argument(\"-n\", \"--nbin\", type=int, default=16, help=\"Nbin\")\n parser.add_argument(\n \"--plot\",\n action=\"store_true\",\n default=False,\n dest=\"plot\",\n help=\"Plot profile\",\n )\n parser.add_argument(\n \"--tomax\",\n action=\"store_true\",\n default=False,\n dest=\"tomax\",\n help=\"Refer phase to pulse max\",\n )\n parser.add_argument(\n \"--test\",\n action=\"store_true\",\n default=False,\n dest=\"test\",\n help=\"Only for unit tests! Do not use\",\n )\n parser.add_argument(\n \"--refTOA\",\n default=None,\n type=np.longdouble,\n help=\"Reference TOA in MJD (overrides --tomax) for \" \"reference pulse phase\",\n dest=\"pulse_ref_time\",\n )\n parser.add_argument(\n \"--pepoch\",\n default=None,\n type=np.longdouble,\n help=\"Reference time for timing solution\",\n dest=\"pepoch\",\n )\n\n args = check_negative_numbers_in_args(args)\n args = parser.parse_args(args)\n\n if args.freqs is None and args.parfile is None:\n raise ValueError(\"Specify one between --freqs or --parfile\")\n elif args.freqs is not None and args.parfile is not None:\n raise ValueError(\"Specify only one between --freqs or --parfile\")\n elif args.freqs is not None:\n parameter_info = args.freqs\n else:\n parameter_info = args.parfile\n\n plot = args.plot\n expocorr = True\n\n phase_tag_fits(\n args.file,\n parameter_info,\n plot=plot,\n nbin=args.nbin,\n test=args.test,\n expocorr=expocorr,\n ref_to_max=args.tomax,\n pulse_ref_time=args.pulse_ref_time,\n pepoch=args.pepoch,\n )\n", "sub_path": "hendrics/phasetag.py", "file_name": "phasetag.py", "file_ext": "py", "file_size_in_byte": 12453, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.path.splitext", "line_number": 31, "usage_type": "call"}, {"api_name": "os.path", "line_number": 31, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 84, "usage_type": "call"}, {"api_name": "base._assign_value_if_none", "line_number": 89, "usage_type": "call"}, {"api_name": "io.is_string", "line_number": 92, "usage_type": "call"}, {"api_name": "stingray.pulse.pulsar.pulse_phase", "line_number": 105, "usage_type": "call"}, {"api_name": "stingray.pulse.pulsar.pulse_phase", "line_number": 106, "usage_type": "call"}, {"api_name": "astropy.log.info", "line_number": 112, "usage_type": "call"}, {"api_name": "astropy.log", "line_number": 112, "usage_type": "name"}, {"api_name": "numpy.floor", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 122, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 122, "usage_type": "call"}, {"api_name": "stingray.pulse.pulsar.phase_exposure", "line_number": 123, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 127, "usage_type": "call"}, {"api_name": "fold.fit_profile", "line_number": 129, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 132, "usage_type": "call"}, {"api_name": "fold.std_fold_fit_func", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.histogram", "line_number": 146, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 146, "usage_type": "call"}, {"api_name": "stingray.pulse.pulsar.phase_exposure", "line_number": 148, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.logical_or", "line_number": 151, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 152, "usage_type": "call"}, {"api_name": "astropy.logger.AstropyUserWarning", "line_number": 154, "usage_type": "argument"}, {"api_name": "numpy.ones_like", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 166, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 170, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 170, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.errorbar", "line_number": 171, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 171, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 172, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axvline", "line_number": 176, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 176, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 178, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 178, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "stingray.io.load_events_and_gtis", "line_number": 240, "usage_type": "call"}, {"api_name": "stingray.io.ref_mjd", "line_number": 247, "usage_type": "call"}, {"api_name": "base.hen_root", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 262, "usage_type": "call"}, {"api_name": "astropy.io.fits.open", "line_number": 265, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 265, "usage_type": "name"}, {"api_name": "numpy.argsort", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 272, "usage_type": "call"}, {"api_name": "astropy.io.fits.Column", "line_number": 294, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 294, "usage_type": "name"}, {"api_name": "astropy.io.fits.Column", "line_number": 302, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 302, "usage_type": "name"}, {"api_name": "astropy.io.fits.Column", "line_number": 312, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 312, "usage_type": "name"}, {"api_name": "astropy.io.fits.ColDefs", "line_number": 317, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 317, "usage_type": "name"}, {"api_name": "astropy.io.fits.FITS_rec.from_columns", "line_number": 320, "usage_type": "call"}, {"api_name": "astropy.io.fits.FITS_rec", "line_number": 320, "usage_type": "attribute"}, {"api_name": "astropy.io.fits", "line_number": 320, "usage_type": "name"}, {"api_name": "astropy.io.fits.BinTableHDU", "line_number": 323, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 323, "usage_type": "name"}, {"api_name": "astropy.io.fits.HDUList", "line_number": 331, "usage_type": "call"}, {"api_name": "astropy.io.fits", "line_number": 331, "usage_type": "name"}, {"api_name": "io.save_as_qdp", "line_number": 346, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 355, "usage_type": "call"}, {"api_name": "numpy.longdouble", "line_number": 392, "usage_type": "attribute"}, {"api_name": "numpy.longdouble", "line_number": 399, "usage_type": "attribute"}, {"api_name": "base.check_negative_numbers_in_args", "line_number": 404, "usage_type": "call"}]} +{"seq_id": "4485999", "text": "import cv2\nimport pygame\nimport serial\nimport numpy as np\nimport os\nimport time\n\n\nclass CollectingData:\n\n def __init__(self, url, serial_port, input_size):\n self.STREAM = url\n self.ser = serial.Serial(serial_port, 115200, timeout=1)\n pygame.init()\n pygame.display.set_mode((300, 300))\n self.input_size = input_size #240, 320\n\n self.right = np.array([0,0,1])\n self.left = np.array([1,0,0])\n self.forward = np.array([0,1,0])\n self.stop = np.array([0,0,0])\n\n def start_collection(self):\n cap = cv2.VideoCapture(self.STREAM)\n total_frame = 0\n saved_frame = 0\n X = np.empty((0, self.input_size[0]*self.input_size[1]))\n y = np.empty((0, 3))\n while True:\n # Capture frame\n ret, frame = cap.read()\n # resize\n to_x = cv2.resize(frame, self.input_size, interpolation=cv2.INTER_CUBIC)\n # Display the resulting frame\n cv2.imshow('frame', frame)\n total_frame += 1\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n break\n\n if event.type == pygame.KEYDOWN:\n key_input = pygame.key.get_pressed()\n\n # simple orders\n if key_input[pygame.K_UP]:\n print(\"Forward\")\n saved_frame += 1\n X = np.vstack((X, to_x))\n y = np.vstack((y, self.forward))\n self.ser.write(chr(1).encode())\n\n elif key_input[pygame.K_DOWN]:\n print(\"Reverse\")\n self.ser.write(chr(2).encode())\n\n elif key_input[pygame.K_RIGHT]:\n print(\"Right\")\n X = np.vstack((X, to_x))\n y = np.vstack((y, self.right))\n saved_frame += 1\n self.ser.write(chr(3).encode())\n\n elif key_input[pygame.K_LEFT]:\n print(\"Left\")\n X = np.vstack((X, to_x))\n y = np.vstack((y, self.left))\n saved_frame += 1\n self.ser.write(chr(4).encode())\n\n elif key_input[pygame.K_x] or key_input[pygame.K_q]:\n print(\"exit\")\n self.ser.write(chr(0).encode())\n self.ser.close()\n break\n\n elif event.type == pygame.KEYUP:\n self.ser.write(chr(0).encode())\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n file = str(int(time.time()))+\"collected_data\"\n path = \"training_data\"\n if not os.path.exists(path):\n os.makedirs(path)\n try:\n np.savez(path + '/' + file + '.npz', train=X, train_labels=y)\n except IOError as e:\n print(e)\n\n print(\"Saved Images: \"+str(saved_frame))\n # When everything done, release the capture\n cap.release()\n cv2.destroyAllWindows()\n pygame.quit()\n\n\nif __name__ == \"__main__\":\n serial_port = \"com9\"\n input_size = (240, 320)\n url = \"http://192.168.1.103:8080/video\"\n cd = CollectingData(url=url, serial_port=serial_port, input_size=input_size)\n cd.start_collection()\n", "sub_path": "Model2_UsingRpiCamera/For_Laptop/collecting_training_data.py", "file_name": "collecting_training_data.py", "file_ext": "py", "file_size_in_byte": 3439, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "serial.Serial", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 15, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 33, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 33, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 35, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 39, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 42, "usage_type": "attribute"}, {"api_name": "pygame.key.get_pressed", "line_number": 43, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 43, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 46, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 50, "usage_type": "call"}, {"api_name": "pygame.K_DOWN", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 57, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.K_LEFT", "line_number": 64, "usage_type": "attribute"}, {"api_name": "numpy.vstack", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 67, "usage_type": "call"}, {"api_name": "pygame.K_x", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.K_q", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.KEYUP", "line_number": 77, "usage_type": "attribute"}, {"api_name": "cv2.waitKey", "line_number": 80, "usage_type": "call"}, {"api_name": "time.time", "line_number": 83, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.savez", "line_number": 88, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 95, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 96, "usage_type": "call"}]} +{"seq_id": "268563570", "text": "#!/usr/bin/python3 \nimport http.server, ssl\n\nserver_address = ('0.0.0.0', 4443)\nhttpd = http.server.HTTPServer(server_address, http.server.SimpleHTTPRequestHandler)\nhttpd.socket = ssl.wrap_socket(httpd.socket,\n server_side=True,\n certfile='./https-localhost/localhost.pem',\n ssl_version=ssl.PROTOCOL_TLS)\nhttpd.serve_forever()\n", "sub_path": "web-client/simple-https-server.py", "file_name": "simple-https-server.py", "file_ext": "py", "file_size_in_byte": 417, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "http.server.server.HTTPServer", "line_number": 5, "usage_type": "call"}, {"api_name": "http.server.server", "line_number": 5, "usage_type": "attribute"}, {"api_name": "http.server", "line_number": 5, "usage_type": "name"}, {"api_name": "ssl.wrap_socket", "line_number": 6, "usage_type": "call"}, {"api_name": "ssl.PROTOCOL_TLS", "line_number": 9, "usage_type": "attribute"}]} +{"seq_id": "317766236", "text": "import torch\nimport time\nimport os\nfrom data import load_enron\nfrom models import StyleTransformer, Discriminator\nfrom train import train, auto_eval\n\ndef most_recent_path(rootpath, return_two=False):\n if rootpath is None:\n return None\n if not s.path.exists(rootpath):\n return None\n l = os.listdir(rootpath)\n if len(l) == 0:\n return None\n \n l.sort()\n ret = (os.path.join(rootpath, l[-1]), os.path.join(rootpath, l[-2]))\n if not return_two:\n ret = ret[0]\n return ret\n\nclass Config():\n data_path = './data/enronpa/'\n log_dir = 'runs/exp'\n save_path = './save'\n pretrained_embed_path = './embedding/'\n device = torch.device('cuda' if True and torch.cuda.is_available() else 'cpu')\n discriminator_method = 'Multi' # 'Multi' or 'Cond'\n load_pretrained_embed = False\n min_freq = 3\n max_length = 32\n embed_size = 256\n d_model = 256\n h = 4\n num_styles = 2\n num_classes = num_styles + 1 if discriminator_method == 'Multi' else 2\n num_layers = 4\n batch_size = 64\n lr_F = 0.0001\n lr_D = 0.0001\n L2 = 0\n iter_D = 10\n iter_F = 5\n F_pretrain_iter = 500\n log_steps = 5\n eval_steps = 25\n learned_pos_embed = True\n dropout = 0\n drop_rate_config = [(1, 0)]\n temperature_config = [(1, 0)]\n\n slf_factor = 0.25\n cyc_factor = 0.5\n adv_factor = 1\n\n inp_shuffle_len = 0\n inp_unk_drop_fac = 0\n inp_rand_drop_fac = 0\n inp_drop_prob = 0\n\n loss_log = './save/loss_log.txt'\n\ndef main():\n config = Config()\n train_iters, test_iters, vocab = load_enron(config)\n print('Vocab size:', len(vocab))\n model_F = StyleTransformer(config, vocab).to(config.device)\n model_D = Discriminator(config, vocab).to(config.device)\n print(config.discriminator_method)\n\n # last_checkpoint = most_recent_path(most_recent_path(config.save_path), return_two=True)\n # if last_checkpoint:\n # print(last_checkpoint)\n # model_D.load_state_dict(torch.load(last_checkpoint[1]))\n # model_F.load_state_dict(torch.load(last_checkpoint[0]))\n\n train(config, vocab, model_F, model_D, train_iters, test_iters)\n \nif __name__ == '__main__':\n main()", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2196, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.listdir", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "torch.device", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 28, "usage_type": "attribute"}, {"api_name": "data.load_enron", "line_number": 66, "usage_type": "call"}, {"api_name": "models.StyleTransformer", "line_number": 68, "usage_type": "call"}, {"api_name": "models.Discriminator", "line_number": 69, "usage_type": "call"}, {"api_name": "train.train", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "519170232", "text": "import sys,time\nimport numpy as np\nimport torch\nfrom tqdm import tqdm, trange\nfrom copy import deepcopy\n\nimport utils\nimport sys\nsys.path.append(\"./approaches/base/\")\nfrom cnn_base import Appr as ApprBase\nimport torch.nn.functional as F\n\n# adapt from https://github.com/joansj/hat/blob/master/src/approaches/sgd.py\n\n\nclass Appr(ApprBase):\n\n def __init__(self,model,args=None,taskcla=None,logger=None):\n super().__init__(model=model,logger=logger,taskcla=taskcla,args=args)\n\n\n print('CNN ONE')\n\n return\n\n\n def train(self,t,train,valid,num_train_steps,train_data,valid_data): #N-CL\n self.model=deepcopy(self.initial_model) # Restart model: isolate\n\n best_loss=np.inf\n best_model=utils.get_model(self.model)\n lr=self.lr\n patience=self.lr_patience\n self.optimizer=self._get_optimizer(lr)\n\n # Loop epochs\n for e in range(self.nepochs):\n # Train\n clock0=time.time()\n iter_bar = tqdm(train, desc='Train Iter (loss=X.XXX)')\n self.train_epoch(t,train,iter_bar)\n clock1=time.time()\n train_loss,train_acc,train_f1_macro=self.eval(t,train)\n clock2=time.time()\n print('| Epoch {:3d}, time={:5.1f}ms/{:5.1f}ms | Train: loss={:.3f}, acc={:5.1f}% |'.format(e+1,\n 1000*self.train_batch_size*(clock1-clock0)/len(train),1000*self.train_batch_size*(clock2-clock1)/len(train),train_loss,100*train_acc))\n # Valid\n valid_loss,valid_acc,valid_f1_macro=self.eval(t,valid)\n print(' Valid: loss={:.3f}, acc={:5.1f}% |'.format(valid_loss,100*valid_acc),end='')\n # Adapt lr\n if valid_loss.\"\"\"\n def __init__( self, refTo, fromLine=0 ):\n super().__init__( fromLine )\n self.refTo= refTo\n self.fullname= None\n self.sequenceList= None\n self.chunkList= []\n def __str__( self ):\n return \"at {!r}: reference to chunk {!r}\".format(self.lineNumber,self.refTo)\n \n def resolve( self, aWeb ):\n \"\"\"Expand our chunk name and list of parts\"\"\"\n self.fullName= aWeb.fullNameFor( self.refTo )\n self.chunkList= aWeb.getchunk( self.refTo )\n \n\n \n def ref( self, aWeb ):\n \"\"\"Find and return the full name for this reference.\"\"\"\n self.resolve( aWeb )\n return self.fullName\n \n\n \n def weave( self, aWeb, aWeaver ):\n \"\"\"Create the nicely formatted reference to a chunk of code.\"\"\"\n self.resolve( aWeb )\n aWeb.weaveChunk( self.fullName, aWeaver )\n \n\n \n def tangle( self, aWeb, aTangler ):\n \"\"\"Create source code.\"\"\"\n self.resolve( aWeb )\n \n self.logger.debug( \"Indent {!r} + {!r}\".format(aTangler.context, self.chunk.previous_command.indent()) ) \n self.chunk.reference_indent( aWeb, aTangler, self.chunk.previous_command.indent() )\n \n self.logger.debug( \"Tangling chunk {!r}\".format(self.fullName) )\n if len(self.chunkList) != 0:\n for p in self.chunkList:\n p.tangle( aWeb, aTangler )\n else:\n raise Error( \"Attempt to tangle an undefined Chunk, {!s}.\".format( self.fullName, ) )\n \n self.chunk.reference_dedent( aWeb, aTangler )\n \n\n\n\n\n\n\nclass Chunk:\n \"\"\"Anonymous piece of input file: will be output through the weaver only.\"\"\"\n # construction and insertion into the web\n def __init__( self ):\n self.commands= [ ] # The list of children of this chunk\n self.user_id_list= None\n self.initial= None\n self.name= ''\n self.fullName= None\n self.seq= None\n self.fileName= ''\n self.referencedBy= [] # Chunks which reference this chunk. Ideally just one.\n self.references= [] # Names that this chunk references\n \n def __str__( self ):\n return \"\\n\".join( map( str, self.commands ) )\n def __repr__( self ):\n return \"{!s}('{!s}')\".format( self.__class__.__name__, self.name )\n \n def append( self, command ):\n \"\"\"Add another Command to this chunk.\"\"\"\n self.commands.append( command )\n command.chunk= self\n \n\n \n def appendText( self, text, lineNumber=0 ):\n \"\"\"Append a single character to the most recent TextCommand.\"\"\"\n try:\n # Works for TextCommand, otherwise breaks\n self.commands[-1].text += text\n except IndexError as e:\n # First command? Then the list will have been empty.\n self.commands.append( self.makeContent(text,lineNumber) )\n except AttributeError as e:\n # Not a TextCommand? Then there won't be a text attribute.\n self.commands.append( self.makeContent(text,lineNumber) )\n \n\n \n def webAdd( self, web ):\n \"\"\"Add self to a Web as anonymous chunk.\"\"\"\n web.add( self )\n \n\n \n \n def genReferences( self, aWeb ):\n \"\"\"Generate references from this Chunk.\"\"\"\n try:\n for t in self.commands:\n ref= t.ref( aWeb )\n if ref is not None:\n yield ref\n except Error as e:\n raise\n \n\n \n def makeContent( self, text, lineNumber=0 ):\n return TextCommand( text, lineNumber )\n \n\n \n def startswith( self, prefix ):\n \"\"\"Examine the first command's starting text.\"\"\"\n return len(self.commands) >= 1 and self.commands[0].startswith( prefix )\n \n def searchForRE( self, rePat ):\n \"\"\"Visit each command, applying the pattern.\"\"\"\n for c in self.commands:\n if c.searchForRE( rePat ):\n return self\n return None\n \n @property\n def lineNumber( self ):\n \"\"\"Return the first command's line number or None.\"\"\"\n return self.commands[0].lineNumber if len(self.commands) >= 1 else None\n \n def getUserIDRefs( self ):\n return []\n \n\n \n def references_list( self, theWeaver ):\n \"\"\"Extract name, sequence from Chunks into a list.\"\"\"\n return [ (c.name, c.seq) \n for c in theWeaver.reference_style.chunkReferencedBy( self ) ]\n\n \n \n def weave( self, aWeb, aWeaver ):\n \"\"\"Create the nicely formatted document from an anonymous chunk.\"\"\"\n aWeaver.docBegin( self )\n for cmd in self.commands:\n cmd.weave( aWeb, aWeaver )\n aWeaver.docEnd( self )\n def weaveReferenceTo( self, aWeb, aWeaver ):\n \"\"\"Create a reference to this chunk -- except for anonymous chunks.\"\"\"\n raise Exception( \"Cannot reference an anonymous chunk.\"\"\")\n def weaveShortReferenceTo( self, aWeb, aWeaver ):\n \"\"\"Create a short reference to this chunk -- except for anonymous chunks.\"\"\"\n raise Exception( \"Cannot reference an anonymous chunk.\"\"\")\n \n\n \n def tangle( self, aWeb, aTangler ):\n \"\"\"Create source code -- except anonymous chunks should not be tangled\"\"\"\n raise Error( 'Cannot tangle an anonymous chunk', self )\n \n\n \n def reference_indent( self, aWeb, aTangler, amount ):\n aTangler.addIndent( amount ) # Or possibly set indent to local zero.\n \n def reference_dedent( self, aWeb, aTangler ):\n aTangler.clrIndent()\n\n\n\n\nclass NamedChunk( Chunk ):\n \"\"\"Named piece of input file: will be output as both tangler and weaver.\"\"\"\n def __init__( self, name ):\n super().__init__()\n self.name= name\n self.user_id_list= []\n self.refCount= 0\n def __str__( self ):\n return \"{!r}: {!s}\".format( self.name, Chunk.__str__(self) )\n def makeContent( self, text, lineNumber=0 ):\n return CodeCommand( text, lineNumber )\n \n def setUserIDRefs( self, text ):\n \"\"\"Save user ID's associated with this chunk.\"\"\"\n self.user_id_list= text.split()\n def getUserIDRefs( self ):\n return self.user_id_list\n \n\n \n def webAdd( self, web ):\n \"\"\"Add self to a Web as named chunk, update xrefs.\"\"\"\n web.addNamed( self )\n \n\n \n def weave( self, aWeb, aWeaver ):\n \"\"\"Create the nicely formatted document from a chunk of code.\"\"\"\n self.fullName= aWeb.fullNameFor( self.name )\n aWeaver.addIndent()\n aWeaver.codeBegin( self )\n for cmd in self.commands:\n cmd.weave( aWeb, aWeaver )\n aWeaver.clrIndent( )\n aWeaver.codeEnd( self )\n def weaveReferenceTo( self, aWeb, aWeaver ):\n \"\"\"Create a reference to this chunk.\"\"\"\n self.fullName= aWeb.fullNameFor( self.name )\n txt= aWeaver.referenceTo( self.fullName, self.seq )\n aWeaver.codeBlock( txt )\n def weaveShortReferenceTo( self, aWeb, aWeaver ):\n \"\"\"Create a shortened reference to this chunk.\"\"\"\n txt= aWeaver.referenceTo( None, self.seq )\n aWeaver.codeBlock( txt )\n \n\n \n def tangle( self, aWeb, aTangler ):\n \"\"\"Create source code.\n Use aWeb to resolve @.\n Format as correctly indented source text\n \"\"\"\n self.previous_command= TextCommand( \"\", self.commands[0].lineNumber )\n aTangler.codeBegin( self )\n for t in self.commands:\n try:\n t.tangle( aWeb, aTangler )\n except Error as e:\n raise\n self.previous_command= t\n aTangler.codeEnd( self )\n \n\n\n\nclass NamedChunk_Noindent( NamedChunk ):\n \"\"\"Named piece of input file: will be output as both tangler and weaver.\"\"\"\n def reference_indent( self, aWeb, aTangler, amount ):\n aTangler.setIndent( 0 )\n \n def reference_dedent( self, aWeb, aTangler ):\n aTangler.clrIndent()\n\n\nclass OutputChunk( NamedChunk ):\n \"\"\"Named piece of input file, defines an output tangle.\"\"\"\n def __init__( self, name, comment_start=None, comment_end=\"\" ):\n super().__init__( name )\n self.comment_start= comment_start\n self.comment_end= comment_end\n \n def webAdd( self, web ):\n \"\"\"Add self to a Web as output chunk, update xrefs.\"\"\"\n web.addOutput( self )\n \n\n \n def weave( self, aWeb, aWeaver ):\n \"\"\"Create the nicely formatted document from a chunk of code.\"\"\"\n self.fullName= aWeb.fullNameFor( self.name )\n aWeaver.fileBegin( self )\n for cmd in self.commands:\n cmd.weave( aWeb, aWeaver )\n aWeaver.fileEnd( self )\n \n\n \n def tangle( self, aWeb, aTangler ):\n aTangler.comment_start= self.comment_start\n aTangler.comment_end= self.comment_end\n super().tangle( aWeb, aTangler )\n\n\n\n\nclass NamedDocumentChunk( NamedChunk ):\n \"\"\"Named piece of input file with document source, defines an output tangle.\"\"\"\n def makeContent( self, text, lineNumber=0 ):\n return TextCommand( text, lineNumber )\n \n def weave( self, aWeb, aWeaver ):\n \"\"\"Ignore this when producing the document.\"\"\"\n pass\n def weaveReferenceTo( self, aWeb, aWeaver ):\n \"\"\"On a reference to this chunk, expand the body in place.\"\"\"\n for cmd in self.commands:\n cmd.weave( aWeb, aWeaver )\n def weaveShortReferenceTo( self, aWeb, aWeaver ):\n \"\"\"On a reference to this chunk, expand the body in place.\"\"\"\n self.weaveReferenceTo( aWeb, aWeaver )\n \n\n \n def tangle( self, aWeb, aTangler ):\n \"\"\"Raise an exception on an attempt to tangle.\"\"\"\n raise Error( \"Cannot tangle a chunk defined with @[.\"\"\" )\n \n\n\n\n\n\nclass Web:\n \"\"\"The overall Web of chunks.\"\"\"\n def __init__( self ):\n self.webFileName= None\n self.chunkSeq= [] \n self.output= {} # Map filename to Chunk\n self.named= {} # Map chunkname to Chunk\n self.sequence= 0\n self.logger= logging.getLogger( self.__class__.__qualname__ )\n def __str__( self ):\n return \"Web {!r}\".format( self.webFileName, )\n\n \n \n def addDefName( self, name ):\n \"\"\"Reference to or definition of a chunk name.\"\"\"\n nm= self.fullNameFor( name )\n if nm is None: return None\n if nm[-3:] == '...':\n self.logger.debug( \"Abbreviated reference {!r}\".format(name) )\n return None # first occurance is a forward reference using an abbreviation\n if nm not in self.named:\n self.named[nm]= []\n self.logger.debug( \"Adding empty chunk {!r}\".format(name) )\n return nm\n \n \n \n def add( self, chunk ):\n \"\"\"Add an anonymous chunk.\"\"\"\n self.chunkSeq.append( chunk )\n chunk.web= weakref.ref(self)\n \n \n \n def addNamed( self, chunk ):\n \"\"\"Add a named chunk to a sequence with a given name.\"\"\"\n self.chunkSeq.append( chunk )\n chunk.web= weakref.ref(self)\n nm= self.addDefName( chunk.name )\n if nm:\n # We found the full name for this chunk\n self.sequence += 1\n chunk.seq= self.sequence\n chunk.fullName= nm\n self.named[nm].append( chunk )\n chunk.initial= len(self.named[nm]) == 1\n self.logger.debug( \"Extending chunk {!r} from {!r}\".format(nm, chunk.name) )\n else:\n raise Error(\"No full name for {!r}\".format(chunk.name), chunk)\n \n \n \n def addOutput( self, chunk ):\n \"\"\"Add an output chunk to a sequence with a given name.\"\"\"\n self.chunkSeq.append( chunk )\n chunk.web= weakref.ref(self)\n if chunk.name not in self.output:\n self.output[chunk.name] = []\n self.logger.debug( \"Adding chunk {!r}\".format(chunk.name) )\n self.sequence += 1\n chunk.seq= self.sequence\n chunk.fullName= chunk.name\n self.output[chunk.name].append( chunk )\n chunk.initial = len(self.output[chunk.name]) == 1\n \n \n\n \n def fullNameFor( self, name ):\n \"\"\"Resolve \"...\" names into the full name.\"\"\"\n if name in self.named: return name\n if name[-3:] == '...':\n best= [ n for n in self.named.keys()\n if n.startswith( name[:-3] ) ]\n if len(best) > 1:\n raise Error(\"Ambiguous abbreviation {!r}, matches {!r}\".format( name, list(sorted(best)) ) )\n elif len(best) == 1: \n return best[0]\n return name\n \n \n def getchunk( self, name ):\n \"\"\"Locate a named sequence of chunks.\"\"\"\n nm= self.fullNameFor( name )\n if nm in self.named:\n return self.named[nm]\n raise Error( \"Cannot resolve {!r} in {!r}\".format(name,self.named.keys()) )\n \n\n \n def createUsedBy( self ):\n \"\"\"Update every piece of a Chunk to show how the chunk is referenced.\n Each piece can then report where it's used in the web.\n \"\"\"\n for aChunk in self.chunkSeq:\n #usage = (self.fullNameFor(aChunk.name), aChunk.seq)\n for aRefName in aChunk.genReferences( self ):\n for c in self.getchunk( aRefName ):\n c.referencedBy.append( aChunk )\n c.refCount += 1\n \n for nm in self.no_reference():\n self.logger.warn( \"No reference to {!r}\".format(nm) )\n for nm in self.multi_reference():\n self.logger.warn( \"Multiple references to {!r}\".format(nm) )\n for nm in self.no_definition():\n self.logger.error( \"No definition for {!r}\".format(nm) )\n self.errors += 1\n \n \n \n def no_reference( self ):\n return [ nm for nm,cl in self.named.items() if len(cl)>0 and cl[0].refCount == 0 ]\n def multi_reference( self ):\n return [ nm for nm,cl in self.named.items() if len(cl)>0 and cl[0].refCount > 1 ]\n def no_definition( self ):\n return [ nm for nm,cl in self.named.items() if len(cl) == 0 ] \n \n \n def fileXref( self ):\n fx= {}\n for f,cList in self.output.items():\n fx[f]= [ c.seq for c in cList ]\n return fx\n def chunkXref( self ):\n mx= {}\n for n,cList in self.named.items():\n mx[n]= [ c.seq for c in cList ]\n return mx\n \n \n def userNamesXref( self ):\n ux= {}\n self._gatherUserId( self.named, ux )\n self._gatherUserId( self.output, ux )\n self._updateUserId( self.named, ux )\n self._updateUserId( self.output, ux )\n return ux\n def _gatherUserId( self, chunkMap, ux ):\n \n for n,cList in chunkMap.items():\n for c in cList:\n for id in c.getUserIDRefs():\n ux[id]= ( c.seq, [] )\n \n def _updateUserId( self, chunkMap, ux ):\n \n # examine source for occurrences of all names in ux.keys()\n for id in ux.keys():\n self.logger.debug( \"References to {!r}\".format(id) )\n idpat= re.compile( r'\\W{!s}\\W'.format(id) )\n for n,cList in chunkMap.items():\n for c in cList:\n if c.seq != ux[id][0] and c.searchForRE( idpat ):\n ux[id][1].append( c.seq )\n \n \n\n \n def language( self, preferredWeaverClass=None ):\n \"\"\"Construct a weaver appropriate to the document's language\"\"\"\n if preferredWeaverClass:\n return preferredWeaverClass()\n self.logger.debug( \"Picking a weaver based on first chunk {!r}\".format(self.chunkSeq[0][:4]) )\n if self.chunkSeq[0].startswith('<'): \n return HTML()\n if self.chunkSeq[0].startswith('%') or self.chunkSeq[0].startswith('\\\\'): \n return LaTeX()\n return RST()\n \n\n \n def tangle( self, aTangler ):\n for f, c in self.output.items():\n with aTangler.open(f):\n for p in c:\n p.tangle( self, aTangler )\n \n\n \n def weave( self, aWeaver ):\n self.logger.debug( \"Weaving file from {!r}\".format(self.webFileName) )\n basename, _ = os.path.splitext( self.webFileName )\n with aWeaver.open(basename):\n for c in self.chunkSeq:\n c.weave( self, aWeaver )\n def weaveChunk( self, name, aWeaver ):\n self.logger.debug( \"Weaving chunk {!r}\".format(name) )\n chunkList= self.getchunk(name)\n if not chunkList:\n raise Error( \"No Definition for {!r}\".format(name) )\n chunkList[0].weaveReferenceTo( self, aWeaver )\n for p in chunkList[1:]:\n aWeaver.write( aWeaver.referenceSep() )\n p.weaveShortReferenceTo( self, aWeaver )\n \n\n\n\n\nclass Tokenizer:\n def __init__( self, stream, command_char='@' ):\n self.command= command_char\n self.parsePat= re.compile( r'({!s}.|\\n)'.format(self.command) )\n self.token_iter= (t for t in self.parsePat.split( stream.read() ) if len(t) != 0)\n self.lineNumber= 0\n def __next__( self ):\n token= next(self.token_iter)\n self.lineNumber += token.count('\\n')\n return token\n def __iter__( self ):\n return self\n\n\n\nclass OptionDef:\n def __init__( self, name, **kw ):\n self.name= name\n self.__dict__.update( kw )\n\nclass OptionParser:\n def __init__( self, *arg_defs ):\n self.args= dict( (arg.name,arg) for arg in arg_defs )\n self.trailers= [k for k in self.args.keys() if not k.startswith('-')]\n def parse( self, text ):\n try:\n word_iter= iter(shlex.split(text))\n except ValueError as e:\n raise Error( \"Error parsing options in {!r}\".format(text) )\n options = dict( s for s in self._group( word_iter ) )\n return options\n def _group( self, word_iter ):\n option, value, final= None, [], []\n for word in word_iter:\n if word == '--':\n if option:\n yield option, value\n try:\n final= [next(word_iter)] \n except StopIteration:\n final= [] # Special case of '--' at the end.\n break\n elif word.startswith('-'):\n if word in self.args:\n if option: \n yield option, value\n option, value = word, []\n else:\n raise ParseError( \"Unknown option {0}\".format(word) )\n else:\n if option:\n if self.args[option].nargs == len(value):\n yield option, value\n final= [word]\n break\n else: \n value.append( word )\n else:\n final= [word]\n break\n # In principle, we step through the trailers based on nargs counts.\n for word in word_iter:\n final.append( word )\n yield self.trailers[0], \" \".join(final)\n\n\nclass WebReader:\n \"\"\"Parse an input file, creating Chunks and Commands.\"\"\"\n\n output_option_parser= OptionParser(\n OptionDef( \"-start\", nargs=1, default=None ),\n OptionDef( \"-end\", nargs=1, default=\"\" ),\n OptionDef( \"argument\", nargs='*' ),\n )\n\n definition_option_parser= OptionParser(\n OptionDef( \"-indent\", nargs=0 ),\n OptionDef( \"-noindent\", nargs=0 ),\n OptionDef( \"argument\", nargs='*' ),\n )\n\n def __init__( self, parent=None ):\n self.logger= logging.getLogger( self.__class__.__qualname__ )\n\n # Configuration of this reader.\n self.parent= parent\n if self.parent: \n self.command= self.parent.command\n self.permitList= self.parent.permitList\n else: # Defaults until overridden\n self.command= '@'\n self.permitList= []\n\n # Load options\n self._source= None\n self.fileName= None\n self.theWeb= None\n \n # State of reading and parsing.\n self.tokenizer= None\n self.aChunk= None\n \n # Summary\n self.totalLines= 0\n self.totalFiles= 0\n self.errors= 0 \n \n \n # Structural (\"major\") commands\n self.cmdo= self.command+'o'\n self.cmdd= self.command+'d'\n self.cmdlcurl= self.command+'{'\n self.cmdrcurl= self.command+'}'\n self.cmdlbrak= self.command+'['\n self.cmdrbrak= self.command+']'\n self.cmdi= self.command+'i'\n \n # Inline (\"minor\") commands\n self.cmdlangl= self.command+'<'\n self.cmdrangl= self.command+'>'\n self.cmdpipe= self.command+'|'\n self.cmdlexpr= self.command+'('\n self.cmdrexpr= self.command+')'\n self.cmdcmd= self.command+self.command\n \n # Content \"minor\" commands\n self.cmdf= self.command+'f'\n self.cmdm= self.command+'m'\n self.cmdu= self.command+'u'\n\n def __str__( self ):\n return self.__class__.__name__\n \n def location( self ):\n return (self.fileName, self.tokenizer.lineNumber+1)\n \n\n \n def load( self, web, filename, source=None ):\n self.theWeb= web\n self.fileName= filename\n \n # Only set the a web filename once using the first file.\n # This should be a setter property of the web.\n if self.theWeb.webFileName is None:\n self.theWeb.webFileName= self.fileName\n \n if source:\n self._source= source\n self.parse_source()\n else:\n with open( self.fileName, \"r\" ) as self._source:\n self.parse_source()\n \n def parse_source( self ):\n self.tokenizer= Tokenizer( self._source, self.command )\n self.totalFiles += 1\n \n self.aChunk= Chunk() # Initial anonymous chunk of text.\n self.aChunk.webAdd( self.theWeb )\n \n for token in self.tokenizer:\n if len(token) >= 2 and token.startswith(self.command):\n if self.handleCommand( token ):\n continue\n else:\n self.logger.warn( 'Unknown @-command in input: {!r}'.format(token) )\n self.aChunk.appendText( token, self.tokenizer.lineNumber )\n elif token:\n # Accumulate a non-empty block of text in the current chunk.\n self.aChunk.appendText( token, self.tokenizer.lineNumber )\n \n\n \n def handleCommand( self, token ):\n self.logger.debug( \"Reading {!r}\".format(token) )\n \n if token[:2] == self.cmdo:\n \n args= next(self.tokenizer)\n self.expect( (self.cmdlcurl,) )\n options= self.output_option_parser.parse( args )\n self.aChunk= OutputChunk( name=options['argument'],\n comment_start= options.get('start',None),\n comment_end= options.get('end',\"\"),\n )\n self.aChunk.fileName= self.fileName \n self.aChunk.webAdd( self.theWeb )\n # capture an OutputChunk up to @}\n \n elif token[:2] == self.cmdd:\n \n args= next(self.tokenizer)\n brack= self.expect( (self.cmdlcurl,self.cmdlbrak) )\n options= self.output_option_parser.parse( args )\n name=options['argument']\n \n if brack == self.cmdlbrak:\n self.aChunk= NamedDocumentChunk( name )\n elif brack == self.cmdlcurl:\n if '-noindent' in options:\n self.aChunk= NamedChunk_Noindent( name )\n else:\n self.aChunk= NamedChunk( name )\n elif brack == None:\n pass # Error noted by expect()\n else:\n raise Error( \"Design Error\" )\n \n self.aChunk.fileName= self.fileName \n self.aChunk.webAdd( self.theWeb )\n # capture a NamedChunk up to @} or @]\n \n elif token[:2] == self.cmdi:\n \n incFile= next(self.tokenizer).strip()\n try:\n self.logger.info( \"Including {!r}\".format(incFile) )\n include= WebReader( parent=self )\n include.load( self.theWeb, incFile )\n self.totalLines += include.tokenizer.lineNumber\n self.totalFiles += include.totalFiles\n if include.errors:\n self.errors += include.errors\n self.logger.error( \n \"Errors in included file {!s}, output is incomplete.\".format(\n incFile) )\n except Error as e:\n self.logger.error( \n \"Problems with included file {!s}, output is incomplete.\".format(\n incFile) )\n self.errors += 1\n except IOError as e:\n self.logger.error( \n \"Problems with included file {!s}, output is incomplete.\".format(\n incFile) )\n # Discretionary -- sometimes we want to continue\n if self.cmdi in self.permitList: pass\n else: raise # TODO: Seems heavy-handed\n self.aChunk= Chunk()\n self.aChunk.webAdd( self.theWeb )\n \n elif token[:2] in (self.cmdrcurl,self.cmdrbrak):\n \n self.aChunk= Chunk()\n self.aChunk.webAdd( self.theWeb )\n \n \n \n elif token[:2] == self.cmdpipe:\n \n try:\n self.aChunk.setUserIDRefs( next(self.tokenizer).strip() )\n except AttributeError:\n # Out of place @| user identifier command\n self.logger.error( \"Unexpected references near {!s}: {!s}\".format(self.location(),token) )\n self.errors += 1\n \n elif token[:2] == self.cmdf:\n self.aChunk.append( FileXrefCommand(self.tokenizer.lineNumber) )\n elif token[:2] == self.cmdm:\n self.aChunk.append( MacroXrefCommand(self.tokenizer.lineNumber) )\n elif token[:2] == self.cmdu:\n self.aChunk.append( UserIdXrefCommand(self.tokenizer.lineNumber) )\n elif token[:2] == self.cmdlangl:\n \n # get the name, introduce into the named Chunk dictionary\n expand= next(self.tokenizer).strip()\n closing= self.expect( (self.cmdrangl,) )\n self.theWeb.addDefName( expand )\n self.aChunk.append( ReferenceCommand( expand, self.tokenizer.lineNumber ) )\n self.aChunk.appendText( \"\", self.tokenizer.lineNumber ) # to collect following text\n self.logger.debug( \"Reading {!r} {!r}\".format(expand, closing) )\n \n elif token[:2] == self.cmdlexpr:\n \n # get the Python expression, create the expression result\n expression= next(self.tokenizer)\n self.expect( (self.cmdrexpr,) )\n try:\n # Build Context\n safe= types.SimpleNamespace( **dict( (name,obj) \n for name,obj in builtins.__dict__.items() \n if name not in ('eval', 'exec', 'open', '__import__')))\n globals= dict(\n __builtins__= safe, \n os= types.SimpleNamespace(path=os.path),\n datetime= datetime,\n platform= platform,\n theLocation= self.location(),\n theWebReader= self,\n theFile= self.theWeb.webFileName,\n thisApplication= sys.argv[0],\n __version__= __version__,\n )\n # Evaluate\n result= str(eval(expression, globals))\n except Exception as e:\n self.logger.error( 'Failure to process {!r}: result is {!r}'.format(expression, e) )\n self.errors += 1\n result= \"@({!r}: Error {!r}@)\".format(expression, e)\n self.aChunk.appendText( result, self.tokenizer.lineNumber )\n \n elif token[:2] == self.cmdcmd:\n \n self.aChunk.appendText( self.command, self.tokenizer.lineNumber )\n \n \n elif token[:2] in (self.cmdlcurl,self.cmdlbrak):\n # These should have been consumed as part of @o and @d parsing\n self.logger.error( \"Extra {!r} (possibly missing chunk name) near {!r}\".format(token, self.location()) )\n self.errors += 1\n else:\n return None # did not recogize the command\n return True # did recognize the command\n \n \n def expect( self, tokens ):\n try:\n t= next(self.tokenizer)\n while t == '\\n':\n t= next(self.tokenizer)\n except StopIteration:\n self.logger.error( \"At {!r}: end of input, {!r} not found\".format(self.location(),tokens) )\n self.errors += 1\n return\n if t not in tokens:\n self.logger.error( \"At {!r}: expected {!r}, found {!r}\".format(self.location(),tokens,t) )\n self.errors += 1\n return\n return t\n \n\n\n\n\nclass Emitter:\n \"\"\"Emit an output file; handling indentation context.\"\"\"\n code_indent= 0 # Used by a Tangler\n def __init__( self ):\n self.fileName= \"\"\n self.theFile= None\n self.linesWritten= 0\n self.totalFiles= 0\n self.totalLines= 0\n self.fragment= False\n self.logger= logging.getLogger( self.__class__.__qualname__ )\n self.log_indent= logging.getLogger( \"indent.\" + self.__class__.__qualname__ )\n self.readdIndent( self.code_indent ) # Create context and initial lastIndent values\n def __str__( self ):\n return self.__class__.__name__\n \n def open( self, aFile ):\n \"\"\"Open a file.\"\"\"\n self.fileName= aFile\n self.linesWritten= 0\n self.doOpen( aFile )\n return self\n \n def doOpen( self, aFile ):\n self.logger.debug( \"creating {!r}\".format(self.fileName) )\n \n \n def close( self ):\n self.codeFinish() # Trailing newline for tangler only.\n self.doClose()\n self.totalFiles += 1\n self.totalLines += self.linesWritten\n \n def doClose( self ):\n self.logger.debug( \"wrote {:d} lines to {!s}\".format(\n self.linesWritten, self.fileName) )\n \n \n def write( self, text ):\n if text is None: return\n self.linesWritten += text.count('\\n')\n self.theFile.write( text )\n \n # Context Manager\n def __enter__( self ):\n return self\n def __exit__( self, *exc ):\n self.close()\n return False\n \n \n\n \n def codeBlock( self, text ):\n \"\"\"Indented write of a block of code. We buffer\n The spaces from the last line to act as the indent for the next line.\n \"\"\"\n indent= self.context[-1]\n lines= text.split( '\\n' )\n if len(lines) == 1: # Fragment with no newline.\n self.write('{!s}{!s}'.format(self.lastIndent*' ', lines[0]) )\n self.lastIndent= 0\n self.fragment= True\n else:\n first, rest= lines[:1], lines[1:]\n self.write('{!s}{!s}\\n'.format(self.lastIndent*' ', first[0]) )\n for l in rest[:-1]:\n self.write( '{!s}{!s}\\n'.format(indent*' ', l) )\n if rest[-1]:\n self.write( '{!s}{!s}'.format(indent*' ', rest[-1]) )\n self.lastIndent= 0\n self.fragment= True\n else:\n # Buffer a next indent\n self.lastIndent= len(rest[-1]) + indent\n self.fragment= False\n \n \n quoted_chars = [\n # Must be empty for tangling.\n ]\n \n def quote( self, aLine ):\n \"\"\"Each individual line of code; often overridden by weavers to quote the code.\"\"\"\n clean= aLine\n for from_, to_ in self.quoted_chars:\n clean= clean.replace( from_, to_ )\n return clean\n \n \n def codeFinish( self ):\n if self.fragment:\n self.write('\\n')\n \n\n \n def addIndent( self, increment ):\n self.lastIndent= self.context[-1]+increment\n self.context.append( self.lastIndent )\n self.log_indent.debug( \"addIndent {!s}: {!r}\".format(increment, self.context) )\n def setIndent( self, indent ):\n self.lastIndent= self.context[-1]\n self.context.append( indent )\n self.log_indent.debug( \"setIndent {!s}: {!r}\".format(indent, self.context) )\n def clrIndent( self ):\n if len(self.context) > 1:\n self.context.pop()\n self.lastIndent= self.context[-1]\n self.log_indent.debug( \"clrIndent {!r}\".format(self.context) )\n def readdIndent( self, indent=0 ):\n self.lastIndent= indent\n self.context= [self.lastIndent]\n self.log_indent.debug( \"readdIndent {!s}: {!r}\".format(indent, self.context) )\n \n\n\n\n\nclass Weaver( Emitter ):\n \"\"\"Format various types of XRef's and code blocks when weaving.\n RST format. \n Requires ``.. include:: ``\n and ``.. include:: ``\n \"\"\"\n extension= \".rst\" \n code_indent= 4\n header= \"\"\"\\n.. include:: \\n.. include:: \\n\"\"\"\n \n def __init__( self ):\n super().__init__()\n self.reference_style= None # Must be configured.\n \n \n def doOpen( self, basename ):\n self.fileName= basename + self.extension\n self.logger.info( \"Weaving {!r}\".format(self.fileName) )\n self.theFile= open( self.fileName, \"w\" )\n self.readdIndent( self.code_indent )\n def doClose( self ):\n self.theFile.close()\n self.logger.info( \"Wrote {:d} lines to {!r}\".format(\n self.linesWritten, self.fileName) )\n def addIndent( self, increment=0 ):\n \"\"\"increment not used when weaving\"\"\"\n self.context.append( self.context[-1] )\n self.log_indent.debug( \"addIndent {!s}: {!r}\".format(self.lastIndent, self.context) )\n def codeFinish( self ):\n pass # Not needed when weaving\n \n\n \n # Template Expansions.\n \n \n quoted_chars = [\n # prevent some RST markup from being recognized\n ('\\\\',r'\\\\'), # Must be first.\n ('`',r'\\`'),\n ('_',r'\\_'), \n ('*',r'\\*'),\n ('|',r'\\|'),\n ]\n\n \n def docBegin( self, aChunk ):\n pass\n def docEnd( self, aChunk ):\n pass\n \n\n \n ref_template = string.Template( \"${refList}\" )\n ref_separator = \"; \"\n ref_item_template = string.Template( \"$fullName (`${seq}`_)\" )\n def references( self, aChunk ):\n references= aChunk.references_list( self )\n if len(references) != 0:\n refList= [ \n self.ref_item_template.substitute( seq=s, fullName=n )\n for n,s in references ]\n return self.ref_template.substitute( refList=self.ref_separator.join( refList ) )\n else:\n return \"\"\n \n\n \n cb_template = string.Template( \"\\n.. _`${seq}`:\\n.. rubric:: ${fullName} (${seq}) ${concat}\\n.. parsed-literal::\\n :class: code\\n\\n\" )\n \n def codeBegin( self, aChunk ):\n txt = self.cb_template.substitute( \n seq= aChunk.seq,\n lineNumber= aChunk.lineNumber, \n fullName= aChunk.fullName,\n concat= \"=\" if aChunk.initial else \"+=\", # RST Separator\n )\n self.write( txt )\n \n ce_template = string.Template( \"\\n..\\n\\n .. class:: small\\n\\n |loz| *${fullName} (${seq})*. Used by: ${references}\\n\" )\n \n def codeEnd( self, aChunk ):\n txt = self.ce_template.substitute( \n seq= aChunk.seq,\n lineNumber= aChunk.lineNumber, \n fullName= aChunk.fullName,\n references= self.references( aChunk ),\n )\n self.write(txt)\n \n\n \n fb_template = string.Template( \"\\n.. _`${seq}`:\\n.. rubric:: ${fullName} (${seq}) ${concat}\\n.. parsed-literal::\\n :class: code\\n\\n\" )\n \n def fileBegin( self, aChunk ):\n txt= self.fb_template.substitute(\n seq= aChunk.seq, \n lineNumber= aChunk.lineNumber, \n fullName= aChunk.fullName,\n concat= \"=\" if aChunk.initial else \"+=\", # RST Separator\n )\n self.write( txt )\n \n fe_template= string.Template( \"\\n..\\n\\n .. class:: small\\n\\n |loz| *${fullName} (${seq})*.\\n\" )\n \n def fileEnd( self, aChunk ):\n assert len(self.references( aChunk )) == 0\n txt= self.fe_template.substitute(\n seq= aChunk.seq, \n lineNumber= aChunk.lineNumber, \n fullName= aChunk.fullName,\n references= [] )\n self.write( txt )\n \n\n \n refto_name_template= string.Template(r\"|srarr|\\ ${fullName} (`${seq}`_)\")\n refto_seq_template= string.Template(\"|srarr|\\ (`${seq}`_)\")\n refto_seq_separator= \", \"\n \n def referenceTo( self, aName, seq ):\n \"\"\"Weave a reference to a chunk.\n Provide name to get a full reference.\n name=None to get a short reference.\"\"\"\n if aName:\n return self.refto_name_template.substitute( fullName= aName, seq= seq )\n else:\n return self.refto_seq_template.substitute( seq= seq )\n \n def referenceSep( self ):\n \"\"\"Separator between references.\"\"\"\n return self.refto_seq_separator\n \n\n \n xref_head_template = string.Template( \"\\n\" )\n xref_foot_template = string.Template( \"\\n\" )\n xref_item_template = string.Template( \":${fullName}:\\n ${refList}\\n\" )\n xref_empty_template = string.Template( \"(None)\\n\" )\n \n def xrefHead( self ):\n txt = self.xref_head_template.substitute()\n self.write( txt )\n \n def xrefFoot( self ):\n txt = self.xref_foot_template.substitute()\n self.write( txt )\n \n def xrefLine( self, name, refList ):\n refList= [ self.referenceTo( None, r ) for r in refList ]\n txt= self.xref_item_template.substitute( fullName= name, refList = \" \".join(refList) ) # RST Separator\n self.write( txt )\n \n def xrefEmpty( self ):\n self.write( self.xref_empty_template.substitute() )\n \n name_def_template = string.Template( '[`${seq}`_]' )\n name_ref_template = string.Template( '`${seq}`_' )\n \n def xrefDefLine( self, name, defn, refList ):\n templates = { defn: self.name_def_template }\n refTxt= [ templates.get(r,self.name_ref_template).substitute( seq= r )\n for r in sorted( refList + [defn] ) \n ]\n # Generic space separator\n txt= self.xref_item_template.substitute( fullName= name, refList = \" \".join(refTxt) ) \n self.write( txt )\n \n\n\n\n\nclass RST(Weaver):\n pass\n\n\nclass LaTeX( Weaver ):\n \"\"\"LaTeX formatting for XRef's and code blocks when weaving.\n Requires \\\\usepackage{fancyvrb}\n \"\"\"\n extension= \".tex\"\n code_indent= 0\n header= \"\"\"\\n\\\\usepackage{fancyvrb}\\n\"\"\"\n\n \n cb_template = string.Template( \"\"\"\\\\label{pyweb${seq}}\n \\\\begin{flushleft}\n \\\\textit{Code example ${fullName} (${seq})}\n \\\\begin{Verbatim}[commandchars=\\\\\\\\\\\\{\\\\},codes={\\\\catcode`$$=3\\\\catcode`^=7},frame=single]\\n\"\"\") # Prevent indent\n \n\n \n ce_template= string.Template(\"\"\"\n \\\\end{Verbatim}\n ${references}\n \\\\end{flushleft}\\n\"\"\") # Prevent indentation\n \n\n \n fb_template= cb_template\n \n\n \n fe_template= ce_template\n \n\n \n ref_item_template = string.Template( \"\"\"\n \\\\item Code example ${fullName} (${seq}) (Sect. \\\\ref{pyweb${seq}}, p. \\\\pageref{pyweb${seq}})\\n\"\"\")\n ref_template = string.Template( \"\"\"\n \\\\footnotesize\n Used by:\n \\\\begin{list}{}{}\n ${refList}\n \\\\end{list}\n \\\\normalsize\\n\"\"\")\n \n\n \n quoted_chars = [\n (\"\\\\end{Verbatim}\", \"\\\\end\\,{Verbatim}\"), # Allow \\end{Verbatim}\n (\"\\\\{\",\"\\\\\\,{\"), # Prevent unexpected commands in Verbatim\n (\"$\",\"\\\\$\"), # Prevent unexpected math in Verbatim\n ]\n \n\n \n refto_name_template= string.Template(\"\"\"$$\\\\triangleright$$ Code Example ${fullName} (${seq})\"\"\")\n refto_seq_template= string.Template(\"\"\"(${seq})\"\"\")\n \n\n\n\n\nclass HTML( Weaver ):\n \"\"\"HTML formatting for XRef's and code blocks when weaving.\"\"\"\n extension= \".html\"\n code_indent= 0\n header= \"\"\n \n cb_template= string.Template(\"\"\"\n \n \n

${fullName} (${seq}) ${concat}

\n
\\n\"\"\")\n    \n\n        \n    ce_template= string.Template(\"\"\"\n    
\n

${fullName} (${seq}).\n ${references}\n

\\n\"\"\")\n \n\n \n fb_template= string.Template(\"\"\"\n \n

``${fullName}`` (${seq}) ${concat}

\n
\\n\"\"\") # Prevent indent\n    \n\n        \n    fe_template= string.Template( \"\"\"
\n

◊ ``${fullName}`` (${seq}).\n ${references}\n

\\n\"\"\")\n \n\n \n ref_item_template = string.Template(\n '${fullName} (${seq})'\n )\n ref_template = string.Template( ' Used by ${refList}.' )\n \n\n \n quoted_chars = [\n (\"&\", \"&\"), # Must be first\n (\"<\", \"<\"),\n (\">\", \">\"),\n ('\"', \""\"),\n ]\n \n\n \n refto_name_template = string.Template(\n '${fullName} (${seq})'\n )\n refto_seq_template = string.Template(\n '(${seq})'\n )\n \n\n \n xref_head_template = string.Template( \"
\\n\" )\n xref_foot_template = string.Template( \"
\\n\" )\n xref_item_template = string.Template( \"
${fullName}
${refList}
\\n\" )\n \n name_def_template = string.Template( '•${seq}' )\n name_ref_template = string.Template( '${seq}' )\n \n \n \n\n\n\nclass HTMLShort( HTML ):\n \"\"\"HTML formatting for XRef's and code blocks when weaving with short references.\"\"\"\n \n ref_item_template = string.Template( '(${seq})' )\n \n\n\n\n\nclass Tangler( Emitter ):\n \"\"\"Tangle output files.\"\"\"\n def __init__( self ):\n super().__init__()\n self.comment_start= None\n self.comment_end= \"\"\n self.include_line_numbers= False\n \n def checkPath( self ):\n if \"/\" in self.fileName:\n dirname, _, _ = self.fileName.rpartition(\"/\")\n try:\n os.makedirs( dirname )\n self.logger.info( \"Creating {!r}\".format(dirname) )\n except OSError as e:\n # Already exists. Could check for errno.EEXIST.\n self.logger.debug( \"Exception {!r} creating {!r}\".format(e, dirname) )\n def doOpen( self, aFile ):\n self.fileName= aFile\n self.checkPath()\n self.theFile= open( aFile, \"w\" )\n self.logger.info( \"Tangling {!r}\".format(aFile) )\n def doClose( self ):\n self.theFile.close()\n self.logger.info( \"Wrote {:d} lines to {!r}\".format(\n self.linesWritten, self.fileName) )\n \n\n \n def codeBegin( self, aChunk ):\n self.log_indent.debug( \"{!s}\".format(aChunk.fullName) )\n \n\n\n\n\nclass TanglerMake( Tangler ):\n \"\"\"Tangle output files, leaving files untouched if there are no changes.\"\"\"\n def __init__( self, *args ):\n super().__init__( *args )\n self.tempname= None\n \n def doOpen( self, aFile ):\n fd, self.tempname= tempfile.mkstemp( dir=os.curdir )\n self.theFile= os.fdopen( fd, \"w\" )\n self.logger.info( \"Tangling {!r}\".format(aFile) )\n \n\n \n def doClose( self ):\n self.theFile.close()\n try:\n same= filecmp.cmp( self.tempname, self.fileName )\n except OSError as e:\n same= False # Doesn't exist. Could check for errno.ENOENT\n if same:\n self.logger.info( \"No change to {!r}\".format(self.fileName) )\n os.remove( self.tempname )\n else:\n # Windows requires the original file name be removed first.\n self.checkPath()\n try: \n os.remove( self.fileName )\n except OSError as e:\n pass # Doesn't exist. Could check for errno.ENOENT\n os.rename( self.tempname, self.fileName )\n self.logger.info( \"Wrote {:d} lines to {!r}\".format(\n self.linesWritten, self.fileName) )\n \n\n\n\n\n\nclass Reference:\n def __init__( self ):\n self.logger= logging.getLogger( self.__class__.__qualname__ )\n def chunkReferencedBy( self, aChunk ):\n \"\"\"Return a list of Chunks.\"\"\"\n pass\n\nclass SimpleReference( Reference ):\n def chunkReferencedBy( self, aChunk ):\n refBy= aChunk.referencedBy\n return refBy\n\nclass TransitiveReference( Reference ):\n def chunkReferencedBy( self, aChunk ):\n refBy= aChunk.referencedBy\n self.logger.debug( \"References: {!s}({:d}) {!r}\".format(aChunk.name, aChunk.seq, refBy) )\n return self.allParentsOf( refBy )\n def allParentsOf( self, chunkList, depth=0 ):\n \"\"\"Transitive closure of parents via recursive ascent.\n \"\"\"\n final = []\n for c in chunkList:\n final.append( c )\n final.extend( self.allParentsOf( c.referencedBy, depth+1 ) )\n self.logger.debug( \"References: {0:>{indent}s} {1!s}\".format('--', final, indent=2*depth) )\n return final\n \n\n\n\nclass Action:\n \"\"\"An action performed by pyWeb.\"\"\"\n def __init__( self, name ):\n self.name= name\n self.web= None\n self.options= None\n self.start= None\n self.logger= logging.getLogger( self.__class__.__qualname__ )\n def __str__( self ):\n return \"{!s} [{!s}]\".format( self.name, self.web )\n \n def __call__( self ):\n self.logger.info( \"Starting {!s}\".format(self.name) )\n self.start= time.process_time()\n \n\n \n def duration( self ):\n \"\"\"Return duration of the action.\"\"\"\n return (self.start and time.process_time()-self.start) or 0\n def summary( self ):\n return \"{!s} in {:0.2f} sec.\".format( self.name, self.duration() )\n \n\n\n\n\nclass ActionSequence( Action ):\n \"\"\"An action composed of a sequence of other actions.\"\"\"\n def __init__( self, name, opSequence=None ):\n super().__init__( name )\n if opSequence: self.opSequence= opSequence\n else: self.opSequence= []\n def __str__( self ):\n return \"; \".join( [ str(x) for x in self.opSequence ] )\n \n def __call__( self ):\n for o in self.opSequence:\n o.web= self.web\n o.options= self.options\n o()\n \n\n \n def append( self, anAction ):\n self.opSequence.append( anAction )\n \n\n \n def summary( self ):\n return \", \".join( [ o.summary() for o in self.opSequence ] )\n \n\n\n\n\nclass WeaveAction( Action ):\n \"\"\"Weave the final document.\"\"\"\n def __init__( self ):\n super().__init__( \"Weave\" )\n def __str__( self ):\n return \"{!s} [{!s}, {!s}]\".format( self.name, self.web, self.theWeaver )\n\n \n def __call__( self ):\n super().__call__()\n if not self.options.theWeaver: \n # Examine first few chars of first chunk of web to determine language\n self.options.theWeaver= self.web.language() \n self.logger.info( \"Using {0}\".format(self.options.theWeaver.__class__.__name__) )\n self.options.theWeaver.reference_style= self.options.reference_style\n try:\n self.web.weave( self.options.theWeaver )\n self.logger.info( \"Finished Normally\" )\n except Error as e:\n self.logger.error(\n \"Problems weaving document from {!s} (weave file is faulty).\".format(\n self.web.webFileName) )\n #raise\n \n\n \n def summary( self ):\n if self.options.theWeaver and self.options.theWeaver.linesWritten > 0:\n return \"{!s} {:d} lines in {:0.2f} sec.\".format( self.name, \n self.options.theWeaver.linesWritten, self.duration() )\n return \"did not {!s}\".format( self.name, )\n \n\n\n\n\nclass TangleAction( Action ):\n \"\"\"Tangle source files.\"\"\"\n def __init__( self ):\n super().__init__( \"Tangle\" )\n \n def __call__( self ):\n super().__call__()\n self.options.theTangler.include_line_numbers= self.options.tangler_line_numbers\n try:\n self.web.tangle( self.options.theTangler )\n except Error as e:\n self.logger.error( \n \"Problems tangling outputs from {!r} (tangle files are faulty).\".format(\n self.web.webFileName) )\n #raise\n \n\n \n def summary( self ):\n if self.options.theTangler and self.options.theTangler.linesWritten > 0:\n return \"{!s} {:d} lines in {:0.2f} sec.\".format( self.name, \n self.options.theTangler.totalLines, self.duration() )\n return \"did not {!r}\".format( self.name, )\n \n\n\n\n\nclass LoadAction( Action ):\n \"\"\"Load the source web.\"\"\"\n def __init__( self ):\n super().__init__( \"Load\" )\n def __str__( self ):\n return \"Load [{!s}, {!s}]\".format( self.webReader, self.web )\n \n def __call__( self ):\n super().__call__()\n self.webReader= self.options.webReader\n self.webReader.command= self.options.command\n self.webReader.permitList= self.options.permitList \n self.web.webFileName= self.options.webFileName\n error= \"Problems with source file {!r}, no output produced.\".format(\n self.options.webFileName)\n try:\n self.webReader.load( self.web, self.options.webFileName )\n if self.webReader.errors != 0:\n self.logger.error( error )\n raise Error( \"Syntax Errors in the Web\" )\n self.web.createUsedBy()\n if self.webReader.errors != 0:\n self.logger.error( error )\n raise Error( \"Internal Reference Errors in the Web\" ) \n except Error as e:\n self.logger.error(error)\n raise # Older design.\n except IOError as e:\n self.logger.error(error)\n raise\n \n\n \n def summary( self ):\n return \"{!s} {:d} lines from {:d} files in {:0.2f} sec.\".format( \n self.name, self.webReader.totalLines, \n self.webReader.totalFiles, self.duration() )\n \n\n\n\n\n\n\nclass Application:\n def __init__( self ):\n self.logger= logging.getLogger( self.__class__.__qualname__ )\n \n self.defaults= argparse.Namespace(\n verbosity= logging.INFO,\n command= '@',\n weaver= 'rst', \n skip= '', # Don't skip any steps\n permit= '', # Don't tolerate missing includes\n reference= 's', # Simple references\n tangler_line_numbers= False,\n )\n self.expand( self.defaults )\n \n # Primitive Actions\n self.loadOp= LoadAction()\n self.weaveOp= WeaveAction()\n self.tangleOp= TangleAction()\n \n # Composite Actions\n self.doWeave= ActionSequence( \"load and weave\", [self.loadOp, self.weaveOp] )\n self.doTangle= ActionSequence( \"load and tangle\", [self.loadOp, self.tangleOp] )\n self.theAction= ActionSequence( \"load, tangle and weave\", [self.loadOp, self.tangleOp, self.weaveOp] )\n\n \n def parseArgs( self ):\n p = argparse.ArgumentParser()\n p.add_argument( \"-v\", \"--verbose\", dest=\"verbosity\", action=\"store_const\", const=logging.INFO )\n p.add_argument( \"-s\", \"--silent\", dest=\"verbosity\", action=\"store_const\", const=logging.WARN )\n p.add_argument( \"-d\", \"--debug\", dest=\"verbosity\", action=\"store_const\", const=logging.DEBUG )\n p.add_argument( \"-c\", \"--command\", dest=\"command\", action=\"store\" )\n p.add_argument( \"-w\", \"--weaver\", dest=\"weaver\", action=\"store\" )\n p.add_argument( \"-x\", \"--except\", dest=\"skip\", action=\"store\", choices=('w','t') )\n p.add_argument( \"-p\", \"--permit\", dest=\"permit\", action=\"store\" )\n p.add_argument( \"-r\", \"--reference\", dest=\"reference\", action=\"store\", choices=('t', 's') )\n p.add_argument( \"-n\", \"--linenumbers\", dest=\"tangler_line_numbers\", action=\"store_true\" )\n p.add_argument( \"files\", nargs='+' )\n config= p.parse_args( namespace=self.defaults )\n self.expand( config )\n return config\n \n def expand( self, config ):\n \"\"\"Translate the argument values from simple text to useful objects.\n Weaver. Tangler. WebReader.\n \"\"\"\n if config.reference == 't':\n config.reference_style = TransitiveReference() \n elif config.reference == 's':\n config.reference_style = SimpleReference()\n else:\n raise Error( \"Improper configuration\" )\n \n try:\n weaver_class= weavers[config.weaver.lower()]\n except KeyError:\n module_name, _, class_name = config.weaver.partition('.')\n weaver_module = __import__(module_name)\n weaver_class = weaver_module.__dict__[class_name]\n if not issubclass(weaver_class, Weaver):\n raise TypeError( \"{0!r} not a subclass of Weaver\".format(weaver_class) )\n config.theWeaver= weaver_class()\n \n config.theTangler= TanglerMake()\n \n if config.permit:\n # save permitted errors, usual case is ``-pi`` to permit ``@i`` include errors\n config.permitList= [ '{!s}{!s}'.format( config.command, c ) for c in config.permit ]\n else:\n config.permitList= []\n \n config.webReader= WebReader()\n \n return config\n \n \n\n \n def process( self, config ):\n root= logging.getLogger()\n root.setLevel( config.verbosity )\n self.logger.debug( \"Setting root log level to {!r}\".format( \n logging.getLevelName(root.getEffectiveLevel()) ) )\n \n if config.command:\n self.logger.debug( \"Command character {!r}\".format(config.command) )\n \n if config.skip:\n if config.skip.lower().startswith('w'): # not weaving == tangling\n self.theAction= self.doTangle\n elif config.skip.lower().startswith('t'): # not tangling == weaving\n self.theAction= self.doWeave\n else:\n raise Exception( \"Unknown -x option {!r}\".format(config.skip) )\n \n self.logger.info( \"Weaver {!s}\".format(config.theWeaver) )\n \n for f in config.files:\n w= Web() # New, empty web to load and process.\n self.logger.info( \"{!s} {!r}\".format(self.theAction.name, f) )\n config.webFileName= f\n self.theAction.web= w\n self.theAction.options= config\n self.theAction()\n self.logger.info( self.theAction.summary() )\n \n\n\n\n# Global list of available weaver classes.\nweavers = {\n 'html': HTML,\n 'htmlshort': HTMLShort,\n 'latex': LaTeX,\n 'rst': RST, \n}\n\n\nclass Logger:\n def __init__( self, dict_config=None, **kw_config ):\n self.dict_config= dict_config\n self.kw_config= kw_config\n def __enter__( self ):\n if self.dict_config:\n logging.config.dictConfig( self.dict_config )\n else:\n logging.basicConfig( **self.kw_config )\n return self\n def __exit__( self, *args ):\n logging.shutdown()\n return False\n\nlog_config= dict(\n version= 1,\n disable_existing_loggers= False, # Allow pre-existing loggers to work.\n handlers= {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'stream': 'ext://sys.stderr',\n 'formatter': 'basic',\n },\n },\n formatters = {\n 'basic': {\n 'format': \"{levelname}:{name}:{message}\",\n 'style': \"{\",\n }\n },\n \n root= { 'handlers': ['console'], 'level': logging.INFO, },\n \n #For specific debugging support...\n loggers= {\n # 'RST': { 'level': logging.DEBUG },\n # 'TanglerMake': { 'level': logging.DEBUG },\n # 'WebReader': { 'level': logging.DEBUG },\n },\n)\n\n\ndef main():\n a= Application()\n config= a.parseArgs()\n a.process(config)\n\nif __name__ == \"__main__\":\n with Logger( log_config ):\n main( )\n\n", "sub_path": "pyweb.py", "file_name": "pyweb.py", "file_ext": "py", "file_size_in_byte": 62582, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "logging.getLogger", "line_number": 79, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 500, "usage_type": "call"}, {"api_name": "weakref.ref", "line_number": 523, "usage_type": "call"}, {"api_name": "weakref.ref", "line_number": 530, "usage_type": "call"}, {"api_name": "weakref.ref", "line_number": 548, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 643, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 675, "usage_type": "call"}, {"api_name": "os.path", "line_number": 675, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 696, "usage_type": "call"}, {"api_name": "shlex.split", "line_number": 719, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 775, "usage_type": "call"}, {"api_name": "types.SimpleNamespace", "line_number": 974, "usage_type": "call"}, {"api_name": "builtins.__dict__.items", "line_number": 975, "usage_type": "call"}, {"api_name": "builtins.__dict__", "line_number": 975, "usage_type": "attribute"}, {"api_name": "types.SimpleNamespace", "line_number": 979, "usage_type": "call"}, {"api_name": "os.path", "line_number": 979, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 985, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 1039, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 1040, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1200, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1202, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1215, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1226, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1239, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1250, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1263, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1264, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1282, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1283, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1284, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1285, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1303, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1304, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1332, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1339, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1354, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1356, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1374, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1375, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1387, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1395, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1403, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1410, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1417, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1420, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1433, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1436, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1442, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1443, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1444, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1446, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1447, "usage_type": "call"}, {"api_name": "string.Template", "line_number": 1457, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 1475, "usage_type": "call"}, {"api_name": "tempfile.mkstemp", "line_number": 1521, "usage_type": "call"}, {"api_name": "os.curdir", "line_number": 1521, "usage_type": "attribute"}, {"api_name": "os.fdopen", "line_number": 1522, "usage_type": "call"}, {"api_name": "filecmp.cmp", "line_number": 1530, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 1535, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 1540, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 1543, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 1554, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 1589, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 1595, "usage_type": "call"}, {"api_name": "time.process_time", "line_number": 1601, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 1748, "usage_type": "call"}, {"api_name": "argparse.Namespace", "line_number": 1750, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 1751, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 1773, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 1774, "usage_type": "attribute"}, {"api_name": "logging.WARN", "line_number": 1775, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 1776, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 1825, "usage_type": "call"}, {"api_name": "logging.getLevelName", "line_number": 1828, "usage_type": "call"}, {"api_name": "logging.config.dictConfig", "line_number": 1870, "usage_type": "call"}, {"api_name": "logging.config", "line_number": 1870, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 1872, "usage_type": "call"}, {"api_name": "logging.shutdown", "line_number": 1875, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 1895, "usage_type": "attribute"}]} +{"seq_id": "567533688", "text": "# coding: utf-8\nimport mock\nfrom lxml import html\n\nfrom nose.tools import assert_equal, assert_true, assert_false\nfrom ...helpers import BaseApplicationTest\nfrom app.api_client.error import APIError\n\n\n@mock.patch('app.main.suppliers.DataAPIClient')\nclass TestSuppliersPage(BaseApplicationTest):\n def setup(self):\n super(TestSuppliersPage, self).setup()\n\n self._data_api_client = mock.patch(\n 'app.main.suppliers.DataAPIClient'\n ).start()\n\n self.supplier = self._get_supplier_fixture_data()\n self.supplier_with_minimum_data = self._get_supplier_with_minimum_fixture_data()\n\n def teardown(self):\n self._data_api_client.stop()\n\n def test_supplier_details_page_requires_login(self, api_client):\n api_client.return_value.get_supplier.return_value = self.supplier\n\n res = self.client.get(self.url_for('main.get_supplier', code=1))\n\n assert res.status_code == 302\n\n def test_arbitrary_suppliers_not_allowed(self, api_client):\n self.login_as_supplier(supplier_code=1234)\n api_client.return_value.get_supplier.return_value = self.supplier\n\n res = self.client.get(self.url_for('main.get_supplier', code=1))\n\n assert res.status_code == 302\n\n def test_suppliers_can_see_own_page(self, api_client):\n self.login_as_supplier(supplier_code=1234)\n api_client.return_value.get_supplier.return_value = self.supplier\n\n res = self.client.get(self.url_for('main.get_supplier', code=1234))\n\n assert res.status_code == 200\n\n def test_should_have_supplier_details_on_supplier_page(self, api_client):\n self.login_as_buyer()\n api_client.return_value.get_supplier.return_value = self.supplier\n\n res = self.client.get(self.url_for('main.get_supplier', code=1))\n document = html.fromstring(res.get_data(as_text=True))\n\n assert res.status_code == 200\n assert document.xpath('//h1')[0].text.strip() == 'Example PTY LTD'\n\n def test_should_show_supplier_with_minimum_data(self, api_client):\n self.login_as_buyer()\n api_client.return_value.get_supplier.return_value = self.supplier_with_minimum_data\n\n res = self.client.get(self.url_for('main.get_supplier', code=1))\n document = html.fromstring(res.get_data(as_text=True))\n\n assert res.status_code == 200\n assert document.xpath('//h1')[0].text.strip() == 'Example PTY LTD'\n assert 'None' not in res.get_data(as_text=True)\n\n def test_should_return_404_if_supplier_code_doesnt_exist(self, api_client):\n self.login_as_buyer()\n api_client.return_value.get_supplier.side_effect = APIError(mock.Mock(status_code=404))\n\n res = self.client.get(self.url_for('main.get_supplier', code=1))\n assert res.status_code == 404\n\n # Check that the test is not silently passing because the URL changed\n api_client.return_value.get_supplier.side_effect = APIError(mock.Mock(status_code=500))\n\n res = self.client.get(self.url_for('main.get_supplier', code=1))\n assert res.status_code == 500\n", "sub_path": "tests/app/views/test_suppliers.py", "file_name": "test_suppliers.py", "file_ext": "py", "file_size_in_byte": 3085, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "helpers.BaseApplicationTest", "line_number": 11, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 15, "usage_type": "call"}, {"api_name": "lxml.html.fromstring", "line_number": 53, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 53, "usage_type": "name"}, {"api_name": "lxml.html.fromstring", "line_number": 63, "usage_type": "call"}, {"api_name": "lxml.html", "line_number": 63, "usage_type": "name"}, {"api_name": "app.api_client.error.APIError", "line_number": 71, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 71, "usage_type": "call"}, {"api_name": "app.api_client.error.APIError", "line_number": 77, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 77, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "549589885", "text": "#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n# Yahoo! Finance market data downloader (+fix for Pandas Datareader)\n# https://github.com/ranaroussi/yfinance\n\n\"\"\"\nSanity check for most common library uses all working\n\n- Stock: Microsoft\n- ETF: Russell 2000 Growth\n- Mutual fund: Vanguard 500 Index fund\n- Index: S&P500\n- Currency BTC-USD\n\"\"\"\n\nfrom __future__ import print_function\nimport yfinance as yf\nimport json\nfrom urllib.error import HTTPError\nimport multiprocessing\nfrom joblib import Parallel, delayed\nfrom tqdm import tqdm\n\n\ndef test_yfinance():\n for symbol in ['MSFT', 'IWO', 'VFINX', '^GSPC', 'BTC-USD']:\n print(\">>\", symbol, end=' ... ')\n ticker = yf.Ticker(symbol)\n\n # always should have info and history for valid symbols\n assert(ticker.info is not None and ticker.info != {})\n assert(ticker.history(period=\"max\").empty is False)\n\n # following should always gracefully handled, no crashes\n ticker.cashflow\n ticker.balance_sheet\n ticker.financials\n ticker.sustainability\n ticker.major_holders\n ticker.institutional_holders\n\n print(\"OK\")\n\n\ndef test_ticker_to_json():\n print(\">>\", \"to_json()\", end=' ... ')\n yf.Ticker('BHFAL').to_json()\n yf.Ticker('ACTTW').to_json()\n yf.Ticker('ADP').to_json()\n yf.Ticker('MSFT').to_json()\n yf.Ticker('ALACR').to_json()\n yf.Ticker('ALYA').to_json()\n yf.Ticker('ACAMW').to_json()\n yf.Ticker('ACTT').to_json()\n print(\"OK\")\n\n\ndef test_big_list_per_ticker():\n def run(t):\n try:\n yf.Ticker(t).to_json()\n except HTTPError:\n pass\n\n print(\">>\", \"to_json()\", end=' ... ')\n tickers = open('source_files/nasdaqlisted.txt').read().split()\n tickers += open('source_files/otherlisted.txt').read().split()\n print(\">> Testing\", len(tickers), \"tickers\")\n num_cores = multiprocessing.cpu_count()\n Parallel(n_jobs=num_cores)(delayed(run)(t) for t in tqdm(tickers))\n print(\"OK\")\n\n\ndef test_tickers_to_json():\n print(\">>\", \"to_json()\", end=' ... ')\n tickers = open('source_files/nasdaqlisted.txt').read().split()\n tickers += open('source_files/otherlisted.txt').read().split()\n ticker_json = yf.Tickers(tickers).to_json()\n json.loads(ticker_json)\n ticker_json = yf.Tickers(tickers).to_json()\n json.loads(ticker_json)\n print(\"OK\")\n\n\ndef test_tickers_download():\n print(\">>\", \"download()\", end=' ... ')\n tickers = open('source_files/nasdaqlisted.txt').read().split()\n tickers += open('source_files/otherlisted.txt').read().split()\n json.loads(yf.Tickers(tickers).download().to_json())\n print(\"OK\")\n\n\nif __name__ == \"__main__\":\n test_yfinance()\n # test_tickers_to_json()\n test_ticker_to_json()\n # test_big_list_per_ticker()\n # test_tickers_download()\n", "sub_path": "runtest.py", "file_name": "runtest.py", "file_ext": "py", "file_size_in_byte": 2810, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "yfinance.Ticker", "line_number": 29, "usage_type": "call"}, {"api_name": "yfinance.Ticker", "line_number": 48, "usage_type": "call"}, {"api_name": "yfinance.Ticker", "line_number": 49, "usage_type": "call"}, {"api_name": "yfinance.Ticker", "line_number": 50, "usage_type": "call"}, {"api_name": "yfinance.Ticker", "line_number": 51, "usage_type": "call"}, {"api_name": "yfinance.Ticker", "line_number": 52, "usage_type": "call"}, {"api_name": "yfinance.Ticker", "line_number": 53, "usage_type": "call"}, {"api_name": "yfinance.Ticker", "line_number": 54, "usage_type": "call"}, {"api_name": "yfinance.Ticker", "line_number": 55, "usage_type": "call"}, {"api_name": "yfinance.Ticker", "line_number": 62, "usage_type": "call"}, {"api_name": "urllib.error.HTTPError", "line_number": 63, "usage_type": "name"}, {"api_name": "multiprocessing.cpu_count", "line_number": 70, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 71, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 71, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 71, "usage_type": "call"}, {"api_name": "yfinance.Tickers", "line_number": 79, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 80, "usage_type": "call"}, {"api_name": "yfinance.Tickers", "line_number": 81, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 82, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 90, "usage_type": "call"}, {"api_name": "yfinance.Tickers", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "534266440", "text": "import os\nimport sys\nimport requests\nimport urllib.request\nimport json\nimport re\nimport argparse\nfrom bs4 import BeautifulSoup\nfrom tqdm import tqdm\n\nparser = argparse.ArgumentParser(description=\"SmugMug Downloader\")\nparser.add_argument(\n\t\"-s\", \"--session\", help=\"session ID (required if user is password protected); log in on a web browser and paste the SMSESS cookie\")\nparser.add_argument(\n\t\"-u\", \"--user\", help=\"username (from URL, USERNAME.smugmug.com)\", required=True)\nparser.add_argument(\"-o\", \"--output\", default=\"output/\",\n help=\"output directory\")\nparser.add_argument(\n\t\"--albums\", help='specific album names to download, split by $. Defaults to all. (e.g. --albums \"Title 1$Title 2$Title 3\")')\n\nargs = parser.parse_args()\n\nendpoint = \"https://www.smugmug.com\"\n\n# Session ID (required if user is password protected)\n# Log in on a web browser and copy the SMSESS cookie\nSMSESS = args.session\n\ncookies = {\"SMSESS\": SMSESS}\n\nif args.output[-1:] != \"/\" and args.output[-1:] != \"\\\\\":\n\toutput_dir = args.output + \"/\"\nelse:\n\toutput_dir = args.output\n\nif args.albums:\n\tspecificAlbums = [x.strip() for x in args.albums.split('$')]\n\n\n# Gets the JSON output from an API call\ndef get_json(url):\n\tr = requests.get(endpoint + url, cookies=cookies)\n\tsoup = BeautifulSoup(r.text, \"html.parser\")\n\tpres = soup.find_all(\"pre\")\n\treturn json.loads(pres[-1].text)\n\n\n# Retrieve the list of albums\nprint(\"Downloading album list...\", end=\"\")\nalbums = get_json(\"/api/v2/folder/user/%s!albumlist\" % args.user)\nprint(\"done.\")\n\n# Quit if no albums were found\ntry:\n\talbums[\"Response\"][\"AlbumList\"]\nexcept KeyError:\n\tsys.exit(\"No albums were found for the user %s. The user may not exist or may be password protected.\" % args.user)\n\n# Create output directories\nprint(\"Creating output directories...\", end=\"\")\nfor album in albums[\"Response\"][\"AlbumList\"]:\n\tif args.albums:\n\t\tif album[\"Name\"].strip() not in specificAlbums:\n\t\t\tcontinue\n\n\tdirectory = output_dir + album[\"UrlPath\"][1:]\n\tif not os.path.exists(directory):\n\t\tos.makedirs(directory)\nprint(\"done.\")\n\n# Loop through each album\nfor album in tqdm(albums[\"Response\"][\"AlbumList\"]):\n\tif args.albums:\n\t\tif album[\"Name\"].strip() not in specificAlbums:\n\t\t\tcontinue\n\n\talbum_path = output_dir + album[\"UrlPath\"][1:]\n\timages = get_json(album[\"Uri\"] + \"!images\")\n\n\t# Loop through each page\n\twhile True:\n\t\t# Skip if no images are in the album\n\t\tif \"AlbumImage\" not in images[\"Response\"]:\n\t\t\tbreak\n\n\t\t# Loop through each image in the album\n\t\tfor image in tqdm(images[\"Response\"][\"AlbumImage\"]):\n\t\t\timage_path = album_path + \"/\" + \\\n\t\t\t\tre.sub('[^\\w\\-_\\. ]', '_', image[\"FileName\"])\n\n\t\t\t# Skip if image has already been saved\n\t\t\tif os.path.isfile(image_path):\n\t\t\t\tcontinue\n\n\t\t\timage_req = get_json(image[\"Uris\"][\"LargestImage\"][\"Uri\"])\n\t\t\tdownload_url = image_req[\"Response\"][\"LargestImage\"][\"Url\"]\n\n\t\t\ttry:\n\t\t\t\turllib.request.urlretrieve(download_url, image_path)\n\t\t\texcept UnicodeEncodeError as ex:\n\t\t\t\tprint(\"Unicode Error: \" + str(ex))\n\t\t\t\tcontinue\n\t\t\texcept urllib.error.HTTPError as ex:\n\t\t\t\tprint(\"HTTP Error: \" + str(ex))\n\n\t\t# Loop through each page of the album\n\t\tif \"NextPage\" in images[\"Response\"][\"Pages\"]:\n\t\t\timages = get_json(images[\"Response\"][\"Pages\"][\"NextPage\"])\n\t\telse:\n\t\t\tbreak\n\nprint(\"Completed.\")\n", "sub_path": "smdl.py", "file_name": "smdl.py", "file_ext": "py", "file_size_in_byte": 3258, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 42, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 43, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 45, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path", "line_number": 67, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 68, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 72, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 87, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 92, "usage_type": "call"}, {"api_name": "os.path", "line_number": 92, "usage_type": "attribute"}, {"api_name": "urllib.request.request.urlretrieve", "line_number": 99, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 99, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 99, "usage_type": "name"}, {"api_name": "urllib.request.error", "line_number": 103, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "179600496", "text": "from flask import Flask, render_template, session, Blueprint\nfrom flask_cors import CORS\nfrom flask_login import LoginManager, login_user, current_user, logout_user, login_required\n# 分割先のコードをインポートする。appsはフォルダ名。\nfrom routes import csv\nfrom routes import image\nfrom routes import root\nfrom routes import user\n\nfrom src.database import init_db\nfrom datetime import timedelta, datetime\nfrom src.model import User\nimport src.config\n\ndef create_app():\n\n app = Flask(__name__)\n # CORS対応\n CORS(app)\n\n # DB設定を読み込む\n app.config.from_object('src.config.Config')\n init_db(app)\n\n # 分割先のルーティング(Blueprint)を登録\n app.register_blueprint(root.web) # /\n app.register_blueprint(csv.web) # /csv\n app.register_blueprint(image.web) # /image\n app.register_blueprint(user.web) # /user\n return app\n\napp = create_app()\n\ndef get_login_manager(app):\n login_manager = LoginManager(app)\n login_manager.login_view = 'user.login'\n login_manager.refresh_view = 'relogin'\n login_manager.needs_refresh_message = (u\"Session timedout, please re-login\")\n login_manager.needs_refresh_message_category = \"info\"\n return login_manager\nlogin_manager = get_login_manager(app)\n\n@app.errorhandler(404)\ndef page_not_found(error):\n return render_template('page_not_found.html'), 404\n\n@app.before_request\ndef before_request():\n session.permanent = True\n app.permanent_session_lifetime = timedelta(minutes=30)\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(int(user_id))", "sub_path": "routes/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1576, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "flask.Flask", "line_number": 17, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 19, "usage_type": "call"}, {"api_name": "src.database.init_db", "line_number": 23, "usage_type": "call"}, {"api_name": "routes.root.web", "line_number": 26, "usage_type": "attribute"}, {"api_name": "routes.root", "line_number": 26, "usage_type": "name"}, {"api_name": "routes.csv.web", "line_number": 27, "usage_type": "attribute"}, {"api_name": "routes.csv", "line_number": 27, "usage_type": "name"}, {"api_name": "routes.image.web", "line_number": 28, "usage_type": "attribute"}, {"api_name": "routes.image", "line_number": 28, "usage_type": "name"}, {"api_name": "routes.user.web", "line_number": 29, "usage_type": "attribute"}, {"api_name": "routes.user", "line_number": 29, "usage_type": "name"}, {"api_name": "flask_login.LoginManager", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.session.permanent", "line_number": 49, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 49, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 50, "usage_type": "call"}, {"api_name": "src.model.User.query.get", "line_number": 54, "usage_type": "call"}, {"api_name": "src.model.User.query", "line_number": 54, "usage_type": "attribute"}, {"api_name": "src.model.User", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "418032535", "text": "from django.shortcuts import render\nfrom django.views import View\nfrom django.conf import settings\nfrom accounts.models import Profile\nimport json\nfrom pydoc import locate\n\nfrom django.core import serializers\n\n\n# Create your views here.\n\nclass RegisterDelivery(View):\n def get(self, request):\n try:\n request.user.profile\n except Exception as e:\n Profile.objects.create(user=request.user)\n if (request.user.profile.company == None):\n company_obj = None\n else:\n company_obj = json.loads(serializers.serialize('json', [request.user.profile.company, ]))[0]\n company_obj.update({'id': company_obj['pk']})\n serve_scan_page_only = request.user.groups.all().filter(name=\"Scan Group\").__len__() is not 0\n if serve_scan_page_only:\n serve_scan_page_only = 'true'\n else:\n serve_scan_page_only = 'false'\n return render(request, 'app/templates/RegisterDelivery.html',\n context={\n \"serve_scan_page_only\": serve_scan_page_only,\n \"language\": json.dumps(request.user.profile.language),\n \"choices\": json.dumps(settings.LANGUAGES),\n \"company\": json.dumps(company_obj)})\n", "sub_path": "app/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1307, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.views.View", "line_number": 13, "usage_type": "name"}, {"api_name": "accounts.models.Profile.objects.create", "line_number": 18, "usage_type": "call"}, {"api_name": "accounts.models.Profile.objects", "line_number": 18, "usage_type": "attribute"}, {"api_name": "accounts.models.Profile", "line_number": 18, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 22, "usage_type": "call"}, {"api_name": "django.core.serializers.serialize", "line_number": 22, "usage_type": "call"}, {"api_name": "django.core.serializers", "line_number": 22, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 29, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 32, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.settings.LANGUAGES", "line_number": 33, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 33, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "217573900", "text": "import time\n\ndef printing_run_function_info(func):\n def printing(*args, **kwargs):\n t0 = time.time()\n func(*args, **kwargs)\n t1 = time.time()\n print('run function [%s]: %s(s)' % (func.__name__, str(t1-t0)))\n return\n return printing\n\ndef App_Action_win32_Mouse_0():\n import App.Action.win32.Mouse as bot\n b = bot.Mouse(300, 300)\n b.move()\n b.right_click()\n\ndef App_Action_win32_Mouse_1():\n import App.Action.win32.Mouse as bot\n bot.Mouse().right_click()\n\ndef App_Action_win32_Screenshot():\n import App.Action.win32.ScreenShot as ScreenShot\n import numpy as np\n from PIL import Image\n\n #ScreenShot.ScreenShot().save()\n\n # show screenshot in python Image library\n img = ScreenShot.ScreenShot().get_img()\n img.show()\n\n@printing_run_function_info\ndef App_Match_Match_0():\n import App.Match.Match as Match\n import numpy as np\n from PIL import Image\n \n img_big = Image.open('temp.bmp') \n img_small = Image.open('ref.bmp')\n\n img_big = np.asarray(img_big)\n img_small = np.asarray(img_small)\n\n print(Match.Match().find(img_big, img_small))\n print(Match.Match().find(img_big, img_small, mode = 'pos2_random'))\n\ndef App_Bot():\n import App.Bot as bot\n import App.Action.win as action\n a = action.Mouse()\n bot.Bot.event(True, a.move(300,300))\n\nif __name__ == '__main__':\n App_Match_Match_0()", "sub_path": "test_function.py", "file_name": "test_function.py", "file_ext": "py", "file_size_in_byte": 1403, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "time.time", "line_number": 5, "usage_type": "call"}, {"api_name": "time.time", "line_number": 7, "usage_type": "call"}, {"api_name": "App.Action.win32.Mouse.Mouse", "line_number": 14, "usage_type": "call"}, {"api_name": "App.Action.win32.Mouse", "line_number": 14, "usage_type": "name"}, {"api_name": "App.Action.win32.Mouse.Mouse", "line_number": 20, "usage_type": "call"}, {"api_name": "App.Action.win32.Mouse", "line_number": 20, "usage_type": "name"}, {"api_name": "App.Action.win32.ScreenShot.ScreenShot", "line_number": 30, "usage_type": "call"}, {"api_name": "App.Action.win32.ScreenShot", "line_number": 30, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 39, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 39, "usage_type": "name"}, {"api_name": "PIL.Image.open", "line_number": 40, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 40, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 43, "usage_type": "call"}, {"api_name": "App.Match.Match.Match", "line_number": 45, "usage_type": "call"}, {"api_name": "App.Match.Match", "line_number": 45, "usage_type": "name"}, {"api_name": "App.Match.Match.Match", "line_number": 46, "usage_type": "call"}, {"api_name": "App.Match.Match", "line_number": 46, "usage_type": "name"}, {"api_name": "App.Action.win.Mouse", "line_number": 51, "usage_type": "call"}, {"api_name": "App.Action.win", "line_number": 51, "usage_type": "name"}, {"api_name": "App.Bot.Bot.event", "line_number": 52, "usage_type": "call"}, {"api_name": "App.Bot.Bot", "line_number": 52, "usage_type": "attribute"}, {"api_name": "App.Bot", "line_number": 52, "usage_type": "name"}]} +{"seq_id": "252677462", "text": "# -*- coding: utf-8 -*-\r\n\r\n\"\"\"\r\nRGB to GrayScale Conversion\r\n\r\n@author: Koushik Khan [write2koushik.stat@outlook.com]\r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\nimport numpy as np\r\nimport argparse\r\nfrom scipy import misc\r\n\r\nbase_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\r\n# print(base_path)\r\nconfig_path = os.path.join(base_path, \"config\")\r\ninput_path = os.path.join(base_path, \"input\")\r\noutput_path = os.path.join(base_path, \"output\")\r\nsys.path.append(base_path)\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('method', help=\"method name used to convert an image, possible choices are 'average', 'lightness' and 'luminosity'\",\r\n choices=['average', 'lightness', 'luminosity'])\r\nargs = parser.parse_args()\r\nmethod = args.method\r\n\r\ndef RGB2GrayScale(input_image, method):\r\n \"\"\"\r\n @input_image: name of the input image\r\n @method: conversion method vis 'Lightness method', 'Average method' and 'Luminosty method'\r\n \r\n Usage: python RGB2GrayScale.py average\r\n or,\r\n python RGB2GrayScale.py lightness\r\n or,\r\n python RGB2GrayScale.py luminosty\r\n \"\"\"\r\n try:\r\n img = misc.imread(os.path.join(input_path, input_image))\r\n img_name = os.path.basename(input_image) # name of the input image with extension e.g. 'face.png'\r\n except FileNotFoundError:\r\n print(\"Image file not found in the path specified!\")\r\n sys.exit(0)\r\n \r\n dim = img.shape\r\n nrow = dim[0]\r\n ncol = dim[1]\r\n \r\n if method == \"average\":\r\n img_new = (img[:, :, 0] + img[:, :, 1] + img[:, :, 2])/3\r\n \r\n elif method == \"lightness\":\r\n img_new = np.zeros((nrow, ncol)) # zero matrix i.e. black image \r\n for i in range(nrow):\r\n for j in range(ncol):\r\n img_new[i, j] = (int(np.min([img[i, j, 0], img[i, j, 1], img[i, j, 2]])) + int(np.max([img[i, j, 0], img[i, j, 1], img[i, j, 2]])))/2\r\n\r\n elif method == \"luminosity\":\r\n # 0.21 R + 0.72 G + 0.07 B\r\n img_new = (0.21 * img[:, :, 0] + 0.72 * img[:, :, 1] + 0.07 * img[:, :, 2])/(0.21 + 0.72 + 0.07)\r\n\r\n else:\r\n #print(\"Conversion method must be any one of 'average', 'lightness' and 'luminosty'\")\r\n #sys.exit(0)\r\n pass\r\n \r\n img_name_comp = img_name.split(\".\")\r\n output_img_name = img_name_comp[0] + \"_\" + method + \".\" + img_name_comp[1]\r\n misc.imsave(os.path.join(output_path, output_img_name), img_new)\r\n return 0\r\n \r\nif __name__ == \"__main__\":\r\n \r\n if not os.path.exists(os.path.join(base_path, \"output\")):\r\n os.makedirs(os.path.join(base_path, \"output\"))\r\n \r\n RGB2GrayScale(\"face.png\", method=method)", "sub_path": "src/RGB2GrayScale.py", "file_name": "RGB2GrayScale.py", "file_ext": "py", "file_size_in_byte": 2690, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.path.dirname", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 20, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 22, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 40, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 40, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 57, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 57, "usage_type": "call"}, {"api_name": "scipy.misc.imsave", "line_number": 70, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 70, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path", "line_number": 75, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 75, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}]} +{"seq_id": "192333802", "text": "import cv2\nimport numpy as np\n\nkernel=np.ones((5,5),int)\n\nimg=cv2.imread(\"Resources/image.jpg\")\nimgGray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #Image to Black and white \nimgBlur= cv2.GaussianBlur(img,(9,9),0) #Blurred Image\nimgCanny=cv2.Canny(img,150,150) #Edge Detection\nimgDialation=cv2.dilate(imgCanny,kernel,iterations=1) #increase canny thickness\nimgEroded=cv2.erode(imgDialation,kernel,iterations=2) #Reduce the thickness of the canny\n\n#cv2.imshow(\"OutputGrayscale\",imgGray)\n#cv2.imshow(\"BlurredImg\",imgBlur)\n#cv2.imshow(\"CannyImg\",imgCanny)\n#cv2.imshow(\"DialationImg\",imgDialation)\ncv2.imshow(\"ErosionImg\",imgEroded)\ncv2.waitKey(0)", "sub_path": "cv2functions.py", "file_name": "cv2functions.py", "file_ext": "py", "file_size_in_byte": 712, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.ones", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 6, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 7, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 7, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.Canny", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 10, "usage_type": "call"}, {"api_name": "cv2.erode", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "383497825", "text": "# IMPORTATIONS STANDARD\nimport logging\nimport random\nimport time\n\n# IMPORTATION THIRD PARTY\nimport pytest\nimport urllib3\n\n# IMPORTATION INTERNAL\nimport degiro_connector.quotecast.helpers.pb_handler as pb_handler\nimport degiro_connector.quotecast.utilities as utilities\nfrom degiro_connector.quotecast.api import API as QuotecastAPI\nfrom degiro_connector.quotecast.models.quotecast_parser import QuotecastParser\nfrom degiro_connector.quotecast.pb.quotecast_pb2 import Chart, Quotecast\n\n# SETUP LOGGING\nlogging.basicConfig(level=logging.FATAL)\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n\n# SETUP FIXTURES\n@pytest.fixture(scope=\"module\")\ndef quotecast_api(user_token) -> QuotecastAPI:\n quotecast_api = QuotecastAPI(user_token=user_token)\n quotecast_api.connect()\n\n return quotecast_api\n\n\n# TESTS FIXTURES\ndef test_fixture_user_token(user_token):\n assert isinstance(user_token, int)\n assert user_token > 0\n\n\ndef test_fixture_quotecast_api(quotecast_api):\n session_id = quotecast_api.connection_storage.session_id\n\n assert isinstance(session_id, str)\n assert len(session_id) == 36\n\n\n# TESTS FEATURES\ndef test_chart(quotecast_api):\n time.sleep(random.uniform(0, 2))\n\n request = Chart.Request()\n request.requestid = \"1\"\n request.resolution = Chart.Interval.PT1M\n request.culture = \"fr-FR\"\n request.series.append(\"issueid:360148977\")\n request.series.append(\"price:issueid:360148977\")\n request.series.append(\"ohlc:issueid:360148977\")\n request.series.append(\"volume:issueid:360148977\")\n request.period = Chart.Interval.P1D\n request.tz = \"Europe/Paris\"\n\n # FETCH DATA\n chart = quotecast_api.get_chart(\n request=request,\n override=None,\n raw=True,\n )\n\n b_series_0_data_keys = [\n \"issueId\",\n \"companyId\",\n \"name\",\n \"identifier\",\n \"isin\",\n \"alfa\",\n \"market\",\n \"currency\",\n \"type\",\n \"quality\",\n \"lastPrice\",\n \"lastTime\",\n \"absDiff\",\n \"relDiff\",\n \"highPrice\",\n \"highTime\",\n \"lowPrice\",\n \"lowTime\",\n \"openPrice\",\n \"openTime\",\n \"closePrice\",\n \"closeTime\",\n \"cumulativeVolume\",\n \"previousClosePrice\",\n \"previousCloseTime\",\n \"tradingStartTime\",\n \"tradingEndTime\",\n \"tradingAddedTime\",\n \"lowPriceP1Y\",\n \"highPriceP1Y\",\n \"windowStart\",\n \"windowEnd\",\n \"windowFirst\",\n \"windowLast\",\n \"windowHighTime\",\n \"windowHighPrice\",\n \"windowLowTime\",\n \"windowLowPrice\",\n \"windowOpenTime\",\n \"windowOpenPrice\",\n \"windowPreviousCloseTime\",\n \"windowPreviousClosePrice\",\n \"windowTrend\",\n ]\n\n series_0_data_keys = list(chart[\"series\"][0][\"data\"].keys())\n\n assert b_series_0_data_keys == series_0_data_keys\n assert chart[\"requestid\"] == \"1\"\n assert chart[\"resolution\"] == \"PT1M\"\n assert chart[\"series\"][0][\"data\"][\"quality\"] == \"REALTIME\"\n assert chart[\"series\"][0][\"data\"][\"issueId\"] == 360148977\n\n\ndef test_quotecast(quotecast_api):\n time.sleep(random.uniform(0, 2))\n\n request = Quotecast.Request()\n request.subscriptions[\"AAPL.BATS,E\"].extend(\n [\n \"LastDate\",\n \"LastTime\",\n \"LastPrice\",\n \"LastVolume\",\n \"AskPrice\",\n \"BidPrice\",\n ]\n )\n quotecast_api.subscribe(request=request)\n\n quotecast = quotecast_api.fetch_data()\n\n quotecast_parser = QuotecastParser()\n quotecast_parser.put_quotecast(quotecast=quotecast)\n ticker = quotecast_parser.ticker\n ticker_dict = pb_handler.message_to_dict(message=ticker)\n metrics = ticker_dict[\"products\"][\"AAPL.BATS,E\"][\"metrics\"]\n\n assert \"AAPL.BATS,E\" in ticker.product_list\n\n for metric in metrics:\n assert isinstance(metrics[metric], float)\n\n\ndef test_build_logger():\n time.sleep(random.uniform(0, 2))\n\n logger = utilities.build_logger()\n\n assert isinstance(logger, logging.Logger)\n", "sub_path": "tests/quotecast/test_quotecast.py", "file_name": "test_quotecast.py", "file_ext": "py", "file_size_in_byte": 4051, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "logging.basicConfig", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.FATAL", "line_number": 18, "usage_type": "attribute"}, {"api_name": "urllib3.disable_warnings", "line_number": 19, "usage_type": "call"}, {"api_name": "urllib3.exceptions", "line_number": 19, "usage_type": "attribute"}, {"api_name": "degiro_connector.quotecast.api.API", "line_number": 25, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 23, "usage_type": "call"}, {"api_name": "degiro_connector.quotecast.api.API", "line_number": 24, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 46, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 46, "usage_type": "call"}, {"api_name": "degiro_connector.quotecast.pb.quotecast_pb2.Chart.Request", "line_number": 48, "usage_type": "call"}, {"api_name": "degiro_connector.quotecast.pb.quotecast_pb2.Chart", "line_number": 48, "usage_type": "name"}, {"api_name": "degiro_connector.quotecast.pb.quotecast_pb2.Chart.Interval", "line_number": 50, "usage_type": "attribute"}, {"api_name": "degiro_connector.quotecast.pb.quotecast_pb2.Chart", "line_number": 50, "usage_type": "name"}, {"api_name": "degiro_connector.quotecast.pb.quotecast_pb2.Chart.Interval", "line_number": 56, "usage_type": "attribute"}, {"api_name": "degiro_connector.quotecast.pb.quotecast_pb2.Chart", "line_number": 56, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 122, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 122, "usage_type": "call"}, {"api_name": "degiro_connector.quotecast.pb.quotecast_pb2.Quotecast.Request", "line_number": 124, "usage_type": "call"}, {"api_name": "degiro_connector.quotecast.pb.quotecast_pb2.Quotecast", "line_number": 124, "usage_type": "name"}, {"api_name": "degiro_connector.quotecast.models.quotecast_parser.QuotecastParser", "line_number": 139, "usage_type": "call"}, {"api_name": "degiro_connector.quotecast.helpers.pb_handler.message_to_dict", "line_number": 142, "usage_type": "call"}, {"api_name": "degiro_connector.quotecast.helpers.pb_handler", "line_number": 142, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 152, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 152, "usage_type": "call"}, {"api_name": "degiro_connector.quotecast.utilities.build_logger", "line_number": 154, "usage_type": "call"}, {"api_name": "degiro_connector.quotecast.utilities", "line_number": 154, "usage_type": "name"}, {"api_name": "logging.Logger", "line_number": 156, "usage_type": "attribute"}]} +{"seq_id": "148153714", "text": "'''\nCreated on Apr 8, 2018\n\n@author: SHADI MERHEJ\n'''\n\nimport requests\nimport json\nimport csv\n\n\ndef main():\n global itWorked\n itWorked = False\n site = \"\"\"http://www.nhl.com/stats/rest/goalies?isAggregate=false&reportType=goalie_basic&isGame=true&reportName=goaliesummary&sort=[{%22property%22:%22gameDate%22,%22direction%22:%22DESC%22}]&cayenneExp=gameDate%3E=%222017-10-04%22%20and%20gameDate%3C=%222018-04-08%22%20and%20gameTypeId=2\"\"\"\n \n # Requests the site info\n blob = requests.get(site).content\n\n # Converts that info from a string to readable json\n json_data = json.loads(blob)['data']\n\n # Opens a blank CSV file\n csv_file = csv.writer(open(\"NHLgoaliesScraped.csv\",\"w\"))\n\n # Writes our header row\n csv_file.writerow([\"assists\",\n \"gameDate\", \n \"gameId\",\n \"gamesPlayed\", \n \"gamesStarted\", \n \"goals\",\n \"goalsAgainst\", \n \"goalsAgainstAverage\", \n \"losses\", \n \"opponentTeamAbbrev\", \n \"otLosses\", \n \"penaltyMinutes\", \n \"playerBirthCountry\", \n \"playerBirthDate\", \n \"playerBirthStateProvince\", \n \"playerDraftOverallPickNo\", \n \"playerDraftRoundNo\", \n \"playerDraftYear\", \n \"playerFirstName\", \n \"playerHeight\", \n \"playerId\", \n \"playerInHockeyHof\", \n \"playerIsActive\", \n \"playerLastName\", \n \"playerName\", \n \"playerNationality\", \n \"playerPositionCode\", \n \"playerShootsCatches\", \n \"playerWeight\", \n \"points\", \n \"savePctg\", \n \"saves\",\n \"shotsAgainst\", \n \"shutouts\", \n \"teamAbbrev\", \n \"ties\", \n \"timeOnIce\", \n \"wins\"]) \n\n # Writes each line one by one to the csv file\n for x in json_data:\n try:\n #if x[\"gameId\"] == \"2017020001\":\n #itWorked = True \n csv_file.writerow([x[\"assists\"],\n x[\"gameDate\"], \n x[\"gameId\"],\n x[\"gamesPlayed\"], \n x[\"gamesStarted\"], \n x[\"goals\"],\n x[\"goalsAgainst\"], \n x[\"goalsAgainstAverage\"], \n x[\"losses\"], \n x[\"opponentTeamAbbrev\"], \n x[\"otLosses\"], \n x[\"penaltyMinutes\"], \n x[\"playerBirthCountry\"], \n x[\"playerBirthDate\"], \n x[\"playerBirthStateProvince\"], \n x[\"playerDraftOverallPickNo\"], \n x[\"playerDraftRoundNo\"], \n x[\"playerDraftYear\"], \n x[\"playerFirstName\"], \n x[\"playerHeight\"], \n x[\"playerId\"], \n x[\"playerInHockeyHof\"], \n x[\"playerIsActive\"], \n x[\"playerLastName\"], \n x[\"playerName\"], \n x[\"playerNationality\"], \n x[\"playerPositionCode\"], \n x[\"playerShootsCatches\"], \n x[\"playerWeight\"], \n x[\"points\"], \n x[\"savePctg\"], \n x[\"saves\"],\n x[\"shotsAgainst\"], \n x[\"shutouts\"], \n x[\"teamAbbrev\"], \n x[\"ties\"], \n x[\"timeOnIce\"], \n x[\"wins\"]]) \n print(x[\"gameId\"])\n #print(type(x[\"gameId\"]))\n if x[\"gameId\"]==2017020001:\n print(\"Yes\")\n else:\n print(\"Nope\")\n \n except UnicodeEncodeError:\n print(x[\"playerName\"], x[\"playerLastName\"], x[\"playerBirthCity\"] )\n continue\nmain()\n", "sub_path": "NHLscraper1.py", "file_name": "NHLscraper1.py", "file_ext": "py", "file_size_in_byte": 3803, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 21, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "468091556", "text": "from kafka import KafkaConsumer, KafkaProducer\nfrom json import loads, dumps\nimport pandas as pd\nimport pickle\n\nconsumer = KafkaConsumer('aqi-weather-joined', auto_offset_reset='earliest',\n bootstrap_servers=['10.0.1.171:9092','10.0.0.154:9092','10.0.2.72:9092'],\n value_deserializer=lambda x: loads(x.decode('utf-8')))\n\nproducer = KafkaProducer(bootstrap_servers=['10.0.1.171:9092','10.0.0.154:9092','10.0.2.72:9092'],\n key_serializer=lambda x: bytes(str(x), encoding='utf-8'),\n value_serializer=lambda x: dumps(x).encode('utf-8') )\n\nfilename = './aqi_prediction_model.pickle'\nwith open(filename, 'rb') as file:\n model = pickle.load(file)\n\nfor message in consumer:\n data=[]\n values = message.value\n data.append(values)\n df = pd.DataFrame(data)\n predicted = model.predict(df[['temp', 'pressure', 'humidity', 'windSpeed', 'windDeg']])\n data = {\"City\": df.city[0], \"Predicted\": predicted[0], \"True\": df.aqi[0]}\n producer.send('aqi-predict', value=data, key=message.key)\n", "sub_path": "python_model/predict_aqi.py", "file_name": "predict_aqi.py", "file_ext": "py", "file_size_in_byte": 1094, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "kafka.KafkaConsumer", "line_number": 6, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 8, "usage_type": "call"}, {"api_name": "kafka.KafkaProducer", "line_number": 10, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 12, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 16, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "230528649", "text": "import home_menu, pygame\n\nW = 400\nH = 400\nFPS = 40 # frames per second\n\nclass Game_Window(object):\n\tdef __init__(self):\n\t\tpygame.init()\n\t\tself.screen = pygame.display.set_mode((W,H),0,32)\n\t\tself.clock = pygame.time.Clock()\n\t\tself.index = 0\n\t\tself.init_states()\n\n\tdef run(self):\n\t\tself.events = pygame.event.get()\n\t\tfor event in self.events:\n\t\t\tif event.type == pygame.QUIT:\n\t\t\t\texit()\n\n\t\t# self.screen.fill( (0,0,0) )\n\t\tsurface = pygame.display.get_surface() # do I have to do this every tick? unsure\n\t\tdt = self.clock.tick(FPS) # dt, in ms\n\t\t\n\t\t# draw current state\n\t\tstate = self.states[self.index]\n\t\tcurrent_state_surface = state.run(self.events,dt)\n\t\tsurface.blit(current_state_surface,(0,0))\n\n\t\t# update display\n\t\tpygame.display.update()\n\n\tdef init_states(self):\n\t\tself.states = [None,None,None,None]\n\t\tself.states[0] = home_menu.Home_Menu()\n\nif __name__ == '__main__':\n\tapp = Game_Window()\n\twhile True: app.run()", "sub_path": "init.py", "file_name": "init.py", "file_ext": "py", "file_size_in_byte": 918, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pygame.init", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pygame.display.get_surface", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 22, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 31, "usage_type": "attribute"}, {"api_name": "home_menu.Home_Menu", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "127713194", "text": "import redis\nfrom config import redisPool\n\n\n'''\n默认6379端口,第0个数据库\n'''\n\n\n\nr = redis.Redis(connection_pool=redisPool)\n\n# r.flushdb()\nif not r.exists(\"SubScan\"):\n file1 = open(r\"dict/SUB_scan.txt\", \"r\", encoding='utf-8')\n for line1 in file1.readlines():\n r.lpush(\"SubScan\", line1.replace(\"\\n\", ''))\n file1.close()\nif not r.exists(\"SenScan\"):\n file2 = open(r\"dict/SEN_scan.txt\", \"r\", encoding='utf-8')\n for line2 in file2.readlines():\n r.lpush(\"SenScan\", line2.replace(\"\\n\", \"\"))\n file2.close()\nif not r.exists(\"XSSpayloads\"):\n file3=open('XSSBug/normal_payload.txt', 'r')\n for line3 in file3.readlines():\n r.lpush(\"XSSpayloads\",line3.replace(\"\\n\",\"\"))\n file3.close()\nif not r.exists(\"bugtype\"):\n file4=open('dict/bugtype.txt', 'r',encoding='utf-8')\n for line4 in file4.readlines():\n line4=line4.strip('\\n')\n name=line4.split(\":\")[0]\n grade=line4.split(\":\")[1]\n r.hset('bugtype',name,grade)\n file4.close()\nif not r.exists(\"useragents\"):\n file5 = open('dict/useragents.txt', 'r', encoding='utf-8')\n for line5 in file5.readlines():\n line5=line5.strip('\\n')\n r.lpush('useragents',line5)\n file5.close()\n\n\n\n", "sub_path": "ImportToRedis.py", "file_name": "ImportToRedis.py", "file_ext": "py", "file_size_in_byte": 1224, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "redis.Redis", "line_number": 11, "usage_type": "call"}, {"api_name": "config.redisPool", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "592890741", "text": "# -*- coding: utf-8 -*-\nfrom dateutil import rrule\nimport pkg_resources\n\nfrom five import grok\n\nfrom zope import schema\nfrom zope.interface import Interface, invariant, Invalid, Attribute\nfrom zope.component import getAllUtilitiesRegisteredFor as getallutils\nfrom zope.schema.interfaces import IContextSourceBinder\nfrom zope.schema.vocabulary import SimpleVocabulary, SimpleTerm\n\nfrom Products.CMFDefault.utils import checkEmailAddress\nfrom Products.CMFDefault.exceptions import EmailAddressInvalid\n\nfrom plone.directives import form\nfrom plone.dexterity.interfaces import IDexterityFTI\nfrom plone.dexterity.utils import schemaNameToPortalType as getname\n\nfrom z3c.form.browser.checkbox import CheckBoxFieldWidget\nfrom z3c.form.browser.radio import RadioFieldWidget\nfrom z3c.form import widget\nfrom seantis.reservation import _, utils\nfrom seantis.reservation.raster import VALID_RASTER_VALUES\n\nfrom seantis.reservation.mail_templates import templates\n\nfrom seantis.reservation.utils import _languagelist\nfrom zope.interface.declarations import alsoProvides\nfrom zope.component.hooks import getSite\nfrom zope.i18n import translate\nfrom zope.schema.interfaces import ITime\nfrom zope.interface.declarations import classImplements\nfrom plone.app.viewletmanager.manager import BaseOrderedViewletManager\n\ntry:\n pkg_resources.get_distribution('plone.multilingual')\n from plone.multilingualbehavior import directives\n\nexcept pkg_resources.DistributionNotFound:\n\n class _NullDirectives(object):\n \"\"\"Null interface use when no multilingual support is available.\"\"\"\n def languageindependent(self, ignored):\n pass\n\n directives = _NullDirectives()\n\ndays = SimpleVocabulary(\n [\n SimpleTerm(value=rrule.MO, title=_(u'Mo')),\n SimpleTerm(value=rrule.TU, title=_(u'Tu')),\n SimpleTerm(value=rrule.WE, title=_(u'We')),\n SimpleTerm(value=rrule.TH, title=_(u'Th')),\n SimpleTerm(value=rrule.FR, title=_(u'Fr')),\n SimpleTerm(value=rrule.SA, title=_(u'Sa')),\n SimpleTerm(value=rrule.SU, title=_(u'Su')),\n ]\n)\n\nrecurrence = SimpleVocabulary(\n [\n SimpleTerm(value=False, title=_(u'Once')),\n SimpleTerm(value=True, title=_(u'Daily')),\n ]\n)\n\ncalendar_views = SimpleVocabulary(\n [\n SimpleTerm(value='month', title=_(u'Monthly View')),\n SimpleTerm(value='agendaWeek', title=_(u'Weekly View')),\n SimpleTerm(value='agendaDay', title=_(u'Daily View'))\n ]\n)\n\ndefault_views = ['month', 'agendaWeek', 'agendaDay']\ndefault_selected_view = 'agendaWeek'\n\ncalendar_dates = SimpleVocabulary(\n [\n SimpleTerm(value='current', title=_(u'Always show the current date')),\n SimpleTerm(value='specific', title=_(u'Always show a specific date'))\n ]\n)\n\n\ndef select_at_least_one(values):\n if not values:\n raise Invalid(\n _(u'Select at least one value')\n )\n return True\n\n\n@grok.provider(IContextSourceBinder)\ndef form_interfaces(context):\n \"\"\" Used as a source for a vocabulary this function returns a vocabulary\n of interfaces which may be used as sub-forms in a resource object.\n\n \"\"\"\n behaviors = set((\n 'seantis.reservation.interfaces.IReservationFormSet',\n 'seantis.reservation.interfaces.IReservationManagerFormSet'\n ))\n ftis = [\n fti for fti in getallutils(IDexterityFTI) if behaviors & set(fti.behaviors)\n ]\n site = getSite()\n\n def get_term(item):\n title = translate(item.Title(), context=site.REQUEST)\n return SimpleTerm(title=title, value=item.id)\n\n return SimpleVocabulary(map(get_term, ftis))\n\n\n@grok.provider(IContextSourceBinder)\ndef plone_languages(context):\n def get_term(item):\n return SimpleTerm(title=item[1]['native'], value=item[0])\n\n terms = sorted(map(get_term, _languagelist.items()), key=lambda t: t.title)\n\n return SimpleVocabulary(terms)\n\n\n# TODO -> Move this to a separate module as it is also used in seantis.dir.base\ndef validate_email(value):\n try:\n if value:\n checkEmailAddress(value)\n except EmailAddressInvalid:\n raise Invalid(_(u'Invalid email address'))\n return True\n\n\nclass EmailField(schema.TextLine):\n\n def __init__(self, *args, **kwargs):\n super(schema.TextLine, self).__init__(*args, **kwargs)\n\n def _validate(self, value):\n super(schema.TextLine, self)._validate(value)\n validate_email(value)\n\n# referenced by configuration.zcml to register the Email fields\nfrom plone.schemaeditor.fields import FieldFactory\nEmailFieldFactory = FieldFactory(EmailField, _(u'Email'))\n\nfrom plone.supermodel.exportimport import BaseHandler\nEmailFieldHandler = BaseHandler(EmailField)\n\n\nclass IOverview(Interface):\n \"\"\" Views implementing this interface may use the OverviewletManager to\n display an overview of a list of resources.\n\n The OverviewletManager displays viewlets which work with a list of\n resources in a folderish view. Those resources are not required to actually\n be stored in that folder. They just need to be defined using this\n interface.\n\n The result may be something like this, with the part on the right side\n being the work of the overview:\n\n Resources Overivew\n\n ╔═══════════════╗\n » Resource One ║ ░ ░ ░ ░ ░ ░ ░ ║\n » Resource Two ║ calendar ║\n ║ ░ ░ ░ ░ ░ ░ ░ ║\n ╚═══════════════╝\n\n » Monthly Report\n » Compare Resources\n \"\"\"\n\n def resource_map(self):\n \"\"\" Returns a dictionary mapping items to resources. Or in the case\n of a simple resource list, the list of uuids of those resources.\n\n Simple\n ------\n If you deal with a folderish view which only displays a list of\n resources you can simply return a list of uuids (or any other\n iterable except strings):\n\n [uuid1, uuid2, uuid3]\n\n The uuids may be UUID objects or strings.\n\n -> See Listing view in seantis.reservation.resource\n\n Grouped\n -------\n If you deal with a folderish view which shows parents of groups of\n resources you should return something like this:\n\n group1 -> [uuid1, uuid2]\n group2 -> [uuid3]\n\n This will highlight group1 if a day containing uuid1 or uuid2 is\n hovered over in the calendar. It will also highlight the days for which\n there's an allocation in any of the resources when a group is hovered\n over.\n\n -> See Directory View in seantis.dir.facility.directory\n\n Note\n ----\n For the highlighting to work the id of the element in the browser\n must be the uuid in the simple and the group key in the grouped\n example.\n\n \"\"\"\n\n\n# not really where I want this code to be, but the code needs some reorganizing\n# because of circular imports and the like\nclass OverviewletManager(BaseOrderedViewletManager, grok.ViewletManager):\n \"\"\" Manages the viewlets shown in the overview. \"\"\"\n grok.context(Interface)\n grok.name('seantis.reservation.overviewletmanager')\n\n @utils.cached_property\n def uuidmap(self):\n \"\"\" Returns a dictionary mapping resource uuids to item ids. \"\"\"\n\n if not IOverview.providedBy(self.view):\n return {}\n\n rmap = self.view.resource_map()\n assert not isinstance(rmap, basestring)\n\n def transform_uuid(target):\n if utils.is_uuid(target):\n return utils.string_uuid(target)\n else:\n return target\n\n if not isinstance(rmap, dict):\n return dict(([transform_uuid(uuid)] * 2) for uuid in rmap)\n\n # overlay.js needs the map the other way around, which is mostly a\n # historical artifact, but it also makes more sense for the developer\n # using IOverview to define a group -> resources relationship\n uuidmap = {}\n for key, resources in rmap.items():\n assert isinstance(resources, (list, tuple, set))\n\n for resource in resources:\n uuidmap.setdefault(transform_uuid(resource), []).append(key)\n\n return uuidmap\n\n @property\n def resource_uuids(self):\n if not self.uuidmap:\n return []\n\n return self.uuidmap.keys()\n\n\nclass IReservationFormSet(Interface):\n \"\"\" Marks interface as usable for sub-forms in a resource object. \"\"\"\n\n\nclass IReservationManagerFormSet(IReservationFormSet):\n \"\"\" Same as IReservationFormSet but only available to managers. \"\"\"\n\n\nclass IResourceAllocationDefaults(form.Schema):\n\n directives.languageindependent('quota')\n quota = schema.Int(\n title=_(u'Quota'),\n description=_(\n u'Number of times an allocation may be reserved at the same time. '\n u'e.g. 3 spots in a daycare center, 2 available cars, '\n u'1 meeting room. '\n ),\n default=1\n )\n\n directives.languageindependent('reservation_quota_limit')\n reservation_quota_limit = schema.Int(\n title=_(u'Reservation Quota Limit'),\n description=_(\n u'The maximum quota a single reservation may occupy at once. '\n u'There is no limit if set to zero.'\n ),\n default=1\n )\n\n directives.languageindependent('approve_manually')\n approve_manually = schema.Bool(\n title=_(u'Manually approve reservation requests'),\n description=_(\n u'If checked a reservation manager must decide if a reservation '\n u'can be approved. Until then users are added to a waitinglist. '\n u'Reservations are automatically approved if this is not checked. '\n ),\n default=False\n )\n\n directives.languageindependent('partly_available')\n partly_available = schema.Bool(\n title=_(u'Partly available'),\n description=_(\n u'If the allocation is partly available users may reserve '\n u'only a part of it (e.g. half of it). If not the allocation '\n u'Must be reserved as a whole or not at all'\n ),\n default=False\n )\n\n directives.languageindependent('raster')\n raster = schema.Choice(\n title=_(u'Raster'),\n description=_(\n u'Defines the minimum length of any given reservation as well '\n u'as the alignment of the start / end of the allocation. E.g. a '\n u'raster of 30 minutes means that the allocation can only start '\n u'at xx:00 and xx:30 respectively'\n ),\n values=VALID_RASTER_VALUES,\n default=15\n )\n\n @invariant\n def isValidQuota(Allocation):\n if not (1 <= Allocation.quota and Allocation.quota <= 1000):\n raise Invalid(_(u'Quota must be between 1 and 1000'))\n\n @invariant\n def isValidQuotaLimit(Allocation):\n if Allocation.reservation_quota_limit < 0:\n raise Invalid(\n _(u'Reservation quota limit must zero or a positive number')\n )\n\n ######### deprecated #########\n approve = schema.Bool(\n title=_(u'DEPRECATED: Approve reservation requests'), default=True\n )\n # approve has been moved to approve_manually and will be removed in\n # a future release. approve_manually is equivalent.\n\n\nclass IResourceBase(IResourceAllocationDefaults):\n \"\"\" A resource displaying a calendar. \"\"\"\n\n title = schema.TextLine(\n title=_(u'Name')\n )\n\n description = schema.Text(\n title=_(u'Description'),\n required=False\n )\n\n directives.languageindependent('first_hour')\n first_hour = schema.Int(\n title=_(u'First hour of the day'),\n description=_(\n u'Everything before this hour is not shown in the '\n u'calendar, making the calendar display more compact. '\n u'Should be set to an hour before which there cannot '\n u'be any reservations.'\n ),\n default=7\n )\n\n directives.languageindependent('last_hour')\n last_hour = schema.Int(\n title=_(u'Last hour of the day'),\n description=_(\n u'Everything after this hour is not shown in the '\n u'calendar, making the calendar display more compact. '\n u'Should be set to an hour after which there cannot '\n u'be any reservations.'\n ),\n default=23\n )\n\n directives.languageindependent('available_views')\n available_views = schema.List(\n title=_(u'Available Views'),\n description=_(u'Views available to the user on the calendar.'),\n value_type=schema.Choice(\n source=calendar_views\n ),\n default=default_views,\n constraint=select_at_least_one\n )\n\n form.widget(available_views=CheckBoxFieldWidget)\n\n directives.languageindependent('selected_view')\n selected_view = schema.Choice(\n title=_(u'Selected View'),\n description=_(u'Selected view when opening the calendar.'),\n source=calendar_views,\n default=default_selected_view\n )\n\n form.widget(selected_view=RadioFieldWidget)\n\n directives.languageindependent('selected_date')\n selected_date = schema.Choice(\n title=_(u'Selected Date'),\n description=_(u'Calendar date shown when opening the calendar.'),\n source=calendar_dates,\n default='current'\n )\n\n form.widget(selected_date=RadioFieldWidget)\n\n directives.languageindependent('specific_date')\n specific_date = schema.Date(\n title=_(u'Specific Date'),\n required=False\n )\n\n form.fieldset(\n 'defaults',\n label=_(u'Default Allocation Values'),\n fields=(\n 'quota', 'partly_available', 'raster', 'approve_manually',\n 'reservation_quota_limit'\n )\n )\n\n directives.languageindependent('formsets')\n formsets = schema.List(\n title=_(u'Formsets'),\n description=_(\n u'Subforms that need to be filled out to make a reservation. '\n u'Forms can currently only be created by a site-administrator.'\n ),\n value_type=schema.Choice(\n source=form_interfaces,\n ),\n required=False\n )\n\n form.widget(formsets=CheckBoxFieldWidget)\n\n @invariant\n def isValidFirstLastHour(Resource):\n in_valid_range = lambda h: 0 <= h and h <= 24\n first_hour, last_hour = Resource.first_hour, Resource.last_hour\n\n if not in_valid_range(first_hour):\n raise Invalid(_(u'Invalid first hour'))\n\n if not in_valid_range(last_hour):\n raise Invalid(_(u'Invalid last hour'))\n\n if last_hour <= first_hour:\n raise Invalid(\n _(u'First hour must be smaller than last hour')\n )\n\n @invariant\n def isValidCalendarDate(Resource):\n if Resource.selected_date == 'specific' and not Resource.specific_date:\n raise Invalid(\n _(u\"You chose to 'Always show a specific date' but you did \"\n u\"not specify a specific date\")\n )\n\n @invariant\n def isValidSelectedView(Resource):\n if Resource.selected_view not in Resource.available_views:\n raise Invalid(\n _(u'The selected view must be one of the available views.')\n )\n\n\nclass IResource(IResourceBase):\n\n def uuid():\n \"\"\"Return the resource's UUID to be used as database foreign key.\n\n For multilingual content this could be UUID of a canonical object.\n\n \"\"\"\n\n\nclass IAllocationTime(ITime):\n \"\"\"Needed for validation.\"\"\"\n\n\nclass AllocationTime(schema.Time):\n \"\"\"An allocation time.\"\"\"\n\n\nclassImplements(AllocationTime, IAllocationTime)\n\n\nclass IAllocation(IResourceAllocationDefaults):\n \"\"\" An reservable time-slot within a calendar. \"\"\"\n\n id = schema.Int(\n title=_(u'Id'),\n default=-1,\n required=False,\n )\n\n group = schema.Text(\n title=_(u'Recurrence'),\n default=u'',\n required=False\n )\n\n timeframes = schema.Text(\n title=_(u'Timeframes'),\n default=u'',\n required=False\n )\n\n start_time = AllocationTime(\n title=_(u'Start'),\n description=_(\n u'Allocations may start every 5 minutes if the allocation '\n u'is not partly available. If it is partly available the start '\n u'time may be every x minute where x equals the given raster.'\n )\n )\n\n end_time = AllocationTime(\n title=_(u'End'),\n description=_(\n u'Allocations may end every 5 minutes if the allocation '\n u'is not partly available. If it is partly available the start '\n u'time may be every x minute where x equals the given raster. '\n u'The minimum length of an allocation is also either 5 minutes '\n u'or whatever the value of the raster is.'\n )\n )\n\n whole_day = schema.Bool(\n title=_(u'Whole Day'),\n description=_(\n u'The allocation spans the whole day.'\n ),\n required=False,\n default=False\n )\n\n recurrence = schema.Text(\n title=_(u'Recurrence'),\n required=False,\n )\n\n day = schema.Date(\n title=_(u'Day'),\n )\n\n days = schema.List(\n title=_(u'Days'),\n value_type=schema.Choice(vocabulary=days),\n required=False\n )\n\n separately = schema.Bool(\n title=_(u'Separately reservable'),\n description=_(\n u'If checked parts of the recurrance may be reserved. '\n u'If not checked the recurrance must be reserved as a whole.'\n ),\n required=False,\n default=False\n )\n\n @invariant\n def isValidRange(Allocation):\n if Allocation.whole_day:\n return\n\n start, end = utils.get_date_range(\n Allocation.day,\n Allocation.start_time, Allocation.end_time\n )\n\n if abs((end - start).seconds // 60) < 5:\n raise Invalid(_(u'The allocation must be at least 5 minutes long'))\n\n @invariant\n def isValidOption(Allocation):\n if Allocation.recurrence:\n if Allocation.partly_available and not Allocation.separately:\n raise Invalid(_(\n u'Partly available allocations can only be reserved '\n u'separately'\n ))\n\n\nclass ITimeframe(form.Schema):\n \"\"\" A timespan which is either visible or hidden. \"\"\"\n\n title = schema.TextLine(\n title=_(u'Name')\n )\n\n start = schema.Date(\n title=_(u'Start')\n )\n\n end = schema.Date(\n title=_(u'End')\n )\n\n @invariant\n def isValidDateRange(Timeframe):\n if Timeframe.start > Timeframe.end:\n raise Invalid(_(u'End date before start date'))\n\ntemplate_variables = _(\n u'May contain the following template variables:
'\n u'%(resource)s - title of the resource
'\n u'%(dates)s - list of dates reserved
'\n u'%(reservation_mail)s - email of reservee
'\n u'%(data)s - formdata associated with the reservation
'\n u'%(approval_link)s - link to the approval view
'\n u'%(denial_link)s - link to the denial view
'\n u'%(cancel_link)s - link to the cancel view'\n)\n\ntemplate_revoke_variables = template_variables + _(\n u'%(reason)s - reason for revocation
'\n)\n\nreservations_template_variables = _(\n u'May contain the following template variable:
'\n u'%(reservations)s - list of reservations
'\n u'%(quota)s - amount of reservations'\n)\n\n\nclass IEmailTemplate(form.Schema):\n \"\"\" An email template used for custom email messages \"\"\"\n\n language = schema.Choice(\n title=_(u'Language'),\n source=plone_languages\n )\n\n reservation_made_subject = schema.TextLine(\n title=_(u'Email Subject for Reservation Autoapproved'),\n description=_(u'Sent to managers when a reservation is '\n u'automatically approved. '\n u'May contain the template variables listed below.'),\n default=templates['reservation_made'].get_subject('en')\n )\n\n reservation_made_content = schema.Text(\n title=_(u'Email Text for Reservation Autoapproved'),\n description=template_variables,\n default=templates['reservation_made'].get_body('en')\n )\n\n reservation_pending_subject = schema.TextLine(\n title=_(u'Email Subject for Reservation Pending'),\n description=_(\n u'Sent to managers when a new pending reservation is made. '\n u'May contain the template variables listed below.'\n ),\n default=templates['reservation_pending'].get_subject('en')\n )\n\n reservation_pending_content = schema.Text(\n title=_(u'Email Text for Reservation Pending'),\n description=template_variables,\n default=templates['reservation_pending'].get_body('en')\n )\n\n reservation_received_subject = schema.TextLine(\n title=_(u'Email Subject for Received Reservations'),\n description=_(\n u'Sent to users when a new pending reservation is made. '\n u'May contain the template variables listed below.'\n ),\n default=templates['reservation_received'].get_subject('en')\n )\n\n reservation_received_content = schema.Text(\n title=_(u'Email Text for Received Reservations'),\n description=reservations_template_variables,\n default=templates['reservation_received'].get_body('en')\n )\n\n reservation_approved_subject = schema.TextLine(\n title=_(u'Email Subject for Approved Reservations'),\n description=_(u'Sent to users when a reservation is approved. '\n u'May contain the template variables listed below.'),\n default=templates['reservation_approved'].get_subject('en')\n )\n\n reservation_approved_content = schema.Text(\n title=_(u'Email Text for Approved Reservations'),\n description=template_variables,\n default=templates['reservation_approved'].get_body('en')\n )\n\n reservation_denied_subject = schema.TextLine(\n title=_(u'Email Subject for Denied Reservations'),\n description=_(u'Sent to users when a reservation is denied. '\n u'May contain the template variables listed below.'),\n default=templates['reservation_denied'].get_subject('en')\n )\n\n reservation_denied_content = schema.Text(\n title=_(u'Email Text for Denied Reservations'),\n description=template_variables,\n default=templates['reservation_denied'].get_body('en')\n )\n\n reservation_revoked_subject = schema.TextLine(\n title=_(u'Email Subject for Revoked Reservations'),\n description=_(u'Sent to users when a reservation is revoked. '\n u'May contain the template variables listed below.'),\n default=templates['reservation_revoked'].get_subject('en')\n )\n\n reservation_revoked_content = schema.Text(\n title=_(u'Email Text for Revoked Reservations'),\n description=template_revoke_variables,\n default=templates['reservation_revoked'].get_body('en')\n )\n\n\ndef get_default_language(adapter):\n return utils.get_current_site_language()\n\nDefaultLanguage = widget.ComputedWidgetAttribute(\n get_default_language, field=IEmailTemplate['language']\n)\n\n\nclass IReservation(Interface):\n \"\"\" A reservation of an allocation (may be pending or approved). \"\"\"\n\n id = schema.Int(\n title=_(u'Id'),\n default=-1,\n required=False\n )\n\n day = schema.Date(\n title=_(u'Day'),\n required=False\n )\n\n start_time = schema.Time(\n title=_(u'Start'),\n required=False\n )\n\n end_time = schema.Time(\n title=_(u'End'),\n required=False\n )\n\n quota = schema.Int(\n title=_(u'Reservation Quota'),\n required=False,\n default=1\n )\n\n email = EmailField(\n title=_(u'Email'),\n required=True\n )\n\n description = schema.TextLine(\n title=_(u'Description'),\n description=_('Visible on the calendar'),\n required=False,\n )\n\n recurrence = schema.Text(\n title=_(u'Recurrence'),\n required=False,\n )\n\n\nclass IReservationIdForm(Interface):\n \"\"\" Describes a form with a hidden reservation-id field. Use with\n seantis.reservation.reserve.ReservationIdForm. \"\"\"\n\n reservation = schema.Text(\n title=_(u'Reservation'),\n required=False\n )\n\n\nclass IAllocationIdForm(IReservationIdForm):\n \"\"\" Describes a form with a hidden reservation-id and allocation-id field.\n Use with seantis.reservation.reserve.ReservationRemovalForm. \"\"\"\n\n allocation_id = schema.Int(title=_(\"Allocation Id\"),\n required=False)\n\n\nclass IGroupReservation(Interface):\n \"\"\" A reservation of an allocation group. \"\"\"\n\n group = schema.Text(\n title=_(u'Recurrence'),\n required=False\n )\n\n quota = schema.Int(\n title=_(u'Reservation Quota'),\n required=False,\n default=1\n )\n\n email = EmailField(\n title=_(u'Email'),\n required=True\n )\n\n\nclass IRevokeReservation(IReservationIdForm):\n \"\"\" For the reservation revocation form. \"\"\"\n\n send_email = schema.Bool(\n title=_(u\"Send Email\"),\n description=_(\n u\"Send an email to the reservee informing him of the revocation\"\n ),\n default=True\n )\n\n reason = schema.Text(\n title=_(u'Reason'),\n description=_(\n u\"Optional reason for the revocation. Sent to the reservee. \"\n u\"e.g. 'Your reservation has to be cancelled because the lecturer \"\n u\"is ill'.\"\n ),\n required=False\n )\n\n\nclass IResourceViewedEvent(Interface):\n \"\"\" Event triggered when a seantis.reservation resource is viewed. Pretty\n useful if you need a hook which is guaranteed to be triggered on a plone\n site where seantis.reservation is active.\n\n \"\"\"\n\n context = Attribute(\"The IResourceBase context object\")\n\n\nclass IReservationBaseEvent(Interface):\n \"\"\" Base Interface for reservation events (not actually fired). \"\"\"\n\n reservation = Attribute(\"The reservation record associated with the event\")\n language = Attribute(\"The language of the site or current request\")\n\n\nclass IReservationMadeEvent(IReservationBaseEvent):\n \"\"\" Event triggered when a reservation is made (autoapproved or\n added to the pending reservation list).\n\n \"\"\"\n\n\nclass IReservationApprovedEvent(IReservationBaseEvent):\n \"\"\" Event triggered when a reservation is approved. \"\"\"\n\n\nclass IReservationDeniedEvent(IReservationBaseEvent):\n \"\"\" Event triggered when a reservation is denied. \"\"\"\n\n\nclass IReservationRevokedEvent(IReservationBaseEvent):\n \"\"\" Event triggered when a reservation is revoked. \"\"\"\n\n reason = Attribute(\"\"\"\n Optional reason for the revocation given by manager. The reason is\n given in the language of the one writing it as the language of the\n reservee is unknown at this point. In the future we might have to\n store said language on the reservation.\n \"\"\")\n\n\nclass IReservationsConfirmedEvent(Interface):\n \"\"\" Event triggered when the user confirms a list of reservations\n (i.e. submits them).\n\n Note how this is not a IReservationBaseEvent because it contains\n _multiple_ reservations, not just one.\n\n \"\"\"\n reservations = Attribute(\"The list of reservations the user confirmed\")\n language = Attribute(\"language of the site or current request\")\n\n\nclass IReservationSlotsCreatedEvent(IReservationBaseEvent):\n \"\"\"Event triggered when all reservations slots have been created.\"\"\"\n\n\nclass IReservationSlotsRemovedEvent(IReservationBaseEvent):\n \"\"\"Event triggered when reservation slots are removed.\"\"\"\n\n dates = Attribute(\"The concerned dates\")\n\n\nclass IReservationSlotsUpdatedEvent(IReservationBaseEvent):\n \"\"\"Triggered when reserved slots for a reservation are updated.\"\"\"\n\n\nclass IReservationUpdatedEvent(IReservationBaseEvent):\n \"\"\"Triggered when a reservation is updated.\"\"\"\n\n old_data = Attribute(\"Old reservation data\")\n time_changed = Attribute(\"Boolean indicating whether reservation time \"\n \"has changed\")\n\n\nclass INotificationMailHandler(Interface):\n\n def __init__(request):\n pass\n\n def on_reservations_confirmed(event):\n pass\n\n def on_reservation_approved(event):\n pass\n\n def on_reservation_denied(event):\n pass\n\n def on_reservation_revoked(event):\n pass\n\n def on_reservation_updated(event):\n pass\n", "sub_path": "seantis/reservation/interfaces.py", "file_name": "interfaces.py", "file_ext": "py", "file_size_in_byte": 28801, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pkg_resources.get_distribution", "line_number": 37, "usage_type": "call"}, {"api_name": "pkg_resources.DistributionNotFound", "line_number": 40, "usage_type": "attribute"}, {"api_name": "plone.multilingualbehavior.directives", "line_number": 47, "usage_type": "name"}, {"api_name": "zope.schema.vocabulary.SimpleVocabulary", "line_number": 49, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 51, "usage_type": "call"}, {"api_name": "dateutil.rrule.MO", "line_number": 51, "usage_type": "attribute"}, {"api_name": "dateutil.rrule", "line_number": 51, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 51, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 52, "usage_type": "call"}, {"api_name": "dateutil.rrule.TU", "line_number": 52, "usage_type": "attribute"}, {"api_name": "dateutil.rrule", "line_number": 52, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 52, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 53, "usage_type": "call"}, {"api_name": "dateutil.rrule.WE", "line_number": 53, "usage_type": "attribute"}, {"api_name": "dateutil.rrule", "line_number": 53, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 53, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 54, "usage_type": "call"}, {"api_name": "dateutil.rrule.TH", "line_number": 54, "usage_type": "attribute"}, {"api_name": "dateutil.rrule", "line_number": 54, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 54, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 55, "usage_type": "call"}, {"api_name": "dateutil.rrule.FR", "line_number": 55, "usage_type": "attribute"}, {"api_name": "dateutil.rrule", "line_number": 55, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 55, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 56, "usage_type": "call"}, {"api_name": "dateutil.rrule.SA", "line_number": 56, "usage_type": "attribute"}, {"api_name": "dateutil.rrule", "line_number": 56, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 56, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 57, "usage_type": "call"}, {"api_name": "dateutil.rrule.SU", "line_number": 57, "usage_type": "attribute"}, {"api_name": "dateutil.rrule", "line_number": 57, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 57, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleVocabulary", "line_number": 61, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 63, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 63, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 64, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 64, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleVocabulary", "line_number": 68, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 70, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 70, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 71, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 71, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 72, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 72, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleVocabulary", "line_number": 79, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 81, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 81, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 82, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 82, "usage_type": "call"}, {"api_name": "zope.interface.Invalid", "line_number": 89, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 90, "usage_type": "call"}, {"api_name": "zope.component.getAllUtilitiesRegisteredFor", "line_number": 106, "usage_type": "call"}, {"api_name": "plone.dexterity.interfaces.IDexterityFTI", "line_number": 106, "usage_type": "argument"}, {"api_name": "zope.component.hooks.getSite", "line_number": 108, "usage_type": "call"}, {"api_name": "zope.i18n.translate", "line_number": 111, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 112, "usage_type": "call"}, {"api_name": "zope.schema.vocabulary.SimpleVocabulary", "line_number": 114, "usage_type": "call"}, {"api_name": "five.grok.provider", "line_number": 95, "usage_type": "call"}, {"api_name": "zope.schema.interfaces.IContextSourceBinder", "line_number": 95, "usage_type": "argument"}, {"api_name": "five.grok", "line_number": 95, "usage_type": "name"}, {"api_name": "zope.schema.vocabulary.SimpleTerm", "line_number": 120, "usage_type": "call"}, {"api_name": "seantis.reservation.utils._languagelist.items", "line_number": 122, "usage_type": "call"}, {"api_name": "seantis.reservation.utils._languagelist", "line_number": 122, "usage_type": "name"}, {"api_name": "zope.schema.vocabulary.SimpleVocabulary", "line_number": 124, "usage_type": "call"}, {"api_name": "five.grok.provider", "line_number": 117, "usage_type": "call"}, {"api_name": "zope.schema.interfaces.IContextSourceBinder", "line_number": 117, "usage_type": "argument"}, {"api_name": "five.grok", "line_number": 117, "usage_type": "name"}, {"api_name": "Products.CMFDefault.utils.checkEmailAddress", "line_number": 131, "usage_type": "call"}, {"api_name": "Products.CMFDefault.exceptions.EmailAddressInvalid", "line_number": 132, "usage_type": "name"}, {"api_name": "zope.interface.Invalid", "line_number": 133, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 133, "usage_type": "call"}, {"api_name": "zope.schema.TextLine", "line_number": 137, "usage_type": "attribute"}, {"api_name": "zope.schema", "line_number": 137, "usage_type": "name"}, {"api_name": "zope.schema.TextLine", "line_number": 140, "usage_type": "attribute"}, {"api_name": "zope.schema", "line_number": 140, "usage_type": "name"}, {"api_name": "zope.schema.TextLine", "line_number": 143, "usage_type": "attribute"}, {"api_name": "zope.schema", "line_number": 143, "usage_type": "name"}, {"api_name": "plone.schemaeditor.fields.FieldFactory", "line_number": 148, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 148, "usage_type": "call"}, {"api_name": "plone.supermodel.exportimport.BaseHandler", "line_number": 151, "usage_type": "call"}, {"api_name": "zope.interface.Interface", "line_number": 154, "usage_type": "name"}, {"api_name": "plone.app.viewletmanager.manager.BaseOrderedViewletManager", "line_number": 220, "usage_type": "name"}, {"api_name": "five.grok.ViewletManager", "line_number": 220, "usage_type": "attribute"}, {"api_name": "five.grok", "line_number": 220, "usage_type": "name"}, {"api_name": "five.grok.context", "line_number": 222, "usage_type": "call"}, {"api_name": "zope.interface.Interface", "line_number": 222, "usage_type": "argument"}, {"api_name": "five.grok", "line_number": 222, "usage_type": "name"}, {"api_name": "five.grok.name", "line_number": 223, "usage_type": "call"}, {"api_name": "five.grok", "line_number": 223, "usage_type": "name"}, {"api_name": "seantis.reservation.utils.is_uuid", "line_number": 236, "usage_type": "call"}, {"api_name": "seantis.reservation.utils", "line_number": 236, "usage_type": "name"}, {"api_name": "seantis.reservation.utils.string_uuid", "line_number": 237, "usage_type": "call"}, {"api_name": "seantis.reservation.utils", "line_number": 237, "usage_type": "name"}, {"api_name": "seantis.reservation.utils.cached_property", "line_number": 225, "usage_type": "attribute"}, {"api_name": "seantis.reservation.utils", "line_number": 225, "usage_type": "name"}, {"api_name": "zope.interface.Interface", "line_number": 264, "usage_type": "name"}, {"api_name": "plone.directives.form.Schema", "line_number": 272, "usage_type": "attribute"}, {"api_name": "plone.directives.form", "line_number": 272, "usage_type": "name"}, {"api_name": "plone.multilingualbehavior.directives.languageindependent", "line_number": 274, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives", "line_number": 274, "usage_type": "name"}, {"api_name": "zope.schema.Int", "line_number": 275, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 275, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 276, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 277, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives.languageindependent", "line_number": 285, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives", "line_number": 285, "usage_type": "name"}, {"api_name": "zope.schema.Int", "line_number": 286, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 286, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 287, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 288, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives.languageindependent", "line_number": 295, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives", "line_number": 295, "usage_type": "name"}, {"api_name": "zope.schema.Bool", "line_number": 296, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 296, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 297, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 298, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives.languageindependent", "line_number": 306, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives", "line_number": 306, "usage_type": "name"}, {"api_name": "zope.schema.Bool", "line_number": 307, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 307, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 308, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 309, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives.languageindependent", "line_number": 317, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives", "line_number": 317, "usage_type": "name"}, {"api_name": "zope.schema.Choice", "line_number": 318, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 318, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 319, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 320, "usage_type": "call"}, {"api_name": "seantis.reservation.raster.VALID_RASTER_VALUES", "line_number": 326, "usage_type": "name"}, {"api_name": "zope.interface.Invalid", "line_number": 333, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 333, "usage_type": "call"}, {"api_name": "zope.interface.invariant", "line_number": 330, "usage_type": "name"}, {"api_name": "zope.interface.Invalid", "line_number": 338, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 339, "usage_type": "call"}, {"api_name": "zope.interface.invariant", "line_number": 335, "usage_type": "name"}, {"api_name": "zope.schema.Bool", "line_number": 343, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 343, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 344, "usage_type": "call"}, {"api_name": "zope.schema.TextLine", "line_number": 353, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 353, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 354, "usage_type": "call"}, {"api_name": "zope.schema.Text", "line_number": 357, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 357, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 358, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives.languageindependent", "line_number": 362, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives", "line_number": 362, "usage_type": "name"}, {"api_name": "zope.schema.Int", "line_number": 363, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 363, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 364, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 365, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives.languageindependent", "line_number": 374, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives", "line_number": 374, "usage_type": "name"}, {"api_name": "zope.schema.Int", "line_number": 375, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 375, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 376, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 377, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives.languageindependent", "line_number": 386, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives", "line_number": 386, "usage_type": "name"}, {"api_name": "zope.schema.List", "line_number": 387, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 387, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 388, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 389, "usage_type": "call"}, {"api_name": "zope.schema.Choice", "line_number": 390, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 390, "usage_type": "name"}, {"api_name": "plone.directives.form.widget", "line_number": 397, "usage_type": "call"}, {"api_name": "plone.directives.form", "line_number": 397, "usage_type": "name"}, {"api_name": "z3c.form.browser.checkbox.CheckBoxFieldWidget", "line_number": 397, "usage_type": "name"}, {"api_name": "plone.multilingualbehavior.directives.languageindependent", "line_number": 399, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives", "line_number": 399, "usage_type": "name"}, {"api_name": "zope.schema.Choice", "line_number": 400, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 400, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 401, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 402, "usage_type": "call"}, {"api_name": "plone.directives.form.widget", "line_number": 407, "usage_type": "call"}, {"api_name": "plone.directives.form", "line_number": 407, "usage_type": "name"}, {"api_name": "z3c.form.browser.radio.RadioFieldWidget", "line_number": 407, "usage_type": "name"}, {"api_name": "plone.multilingualbehavior.directives.languageindependent", "line_number": 409, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives", "line_number": 409, "usage_type": "name"}, {"api_name": "zope.schema.Choice", "line_number": 410, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 410, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 411, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 412, "usage_type": "call"}, {"api_name": "plone.directives.form.widget", "line_number": 417, "usage_type": "call"}, {"api_name": "plone.directives.form", "line_number": 417, "usage_type": "name"}, {"api_name": "z3c.form.browser.radio.RadioFieldWidget", "line_number": 417, "usage_type": "name"}, {"api_name": "plone.multilingualbehavior.directives.languageindependent", "line_number": 419, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives", "line_number": 419, "usage_type": "name"}, {"api_name": "zope.schema.Date", "line_number": 420, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 420, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 421, "usage_type": "call"}, {"api_name": "plone.directives.form.fieldset", "line_number": 425, "usage_type": "call"}, {"api_name": "plone.directives.form", "line_number": 425, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 427, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives.languageindependent", "line_number": 434, "usage_type": "call"}, {"api_name": "plone.multilingualbehavior.directives", "line_number": 434, "usage_type": "name"}, {"api_name": "zope.schema.List", "line_number": 435, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 435, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 436, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 437, "usage_type": "call"}, {"api_name": "zope.schema.Choice", "line_number": 441, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 441, "usage_type": "name"}, {"api_name": "plone.directives.form.widget", "line_number": 447, "usage_type": "call"}, {"api_name": "plone.directives.form", "line_number": 447, "usage_type": "name"}, {"api_name": "z3c.form.browser.checkbox.CheckBoxFieldWidget", "line_number": 447, "usage_type": "name"}, {"api_name": "zope.interface.Invalid", "line_number": 455, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 455, "usage_type": "call"}, {"api_name": "zope.interface.Invalid", "line_number": 458, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 458, "usage_type": "call"}, {"api_name": "zope.interface.Invalid", "line_number": 461, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 462, "usage_type": "call"}, {"api_name": "zope.interface.invariant", "line_number": 449, "usage_type": "name"}, {"api_name": "zope.interface.Invalid", "line_number": 468, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 469, "usage_type": "call"}, {"api_name": "zope.interface.invariant", "line_number": 465, "usage_type": "name"}, {"api_name": "zope.interface.Invalid", "line_number": 476, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 477, "usage_type": "call"}, {"api_name": "zope.interface.invariant", "line_number": 473, "usage_type": "name"}, {"api_name": "zope.schema.interfaces.ITime", "line_number": 491, "usage_type": "name"}, {"api_name": "zope.schema.Time", "line_number": 495, "usage_type": "attribute"}, {"api_name": "zope.schema", "line_number": 495, "usage_type": "name"}, {"api_name": "zope.interface.declarations.classImplements", "line_number": 499, "usage_type": "call"}, {"api_name": "zope.schema.Int", "line_number": 505, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 505, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 506, "usage_type": "call"}, {"api_name": "zope.schema.Text", "line_number": 511, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 511, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 512, "usage_type": "call"}, {"api_name": "zope.schema.Text", "line_number": 517, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 517, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 518, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 524, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 525, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 533, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 534, "usage_type": "call"}, {"api_name": "zope.schema.Bool", "line_number": 543, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 543, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 544, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 545, "usage_type": "call"}, {"api_name": "zope.schema.Text", "line_number": 552, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 552, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 553, "usage_type": "call"}, {"api_name": "zope.schema.Date", "line_number": 557, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 557, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 558, "usage_type": "call"}, {"api_name": "zope.schema.List", "line_number": 561, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 561, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 562, "usage_type": "call"}, {"api_name": "zope.schema.Choice", "line_number": 563, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 563, "usage_type": "name"}, {"api_name": "zope.schema.Bool", "line_number": 567, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 567, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 568, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 569, "usage_type": "call"}, {"api_name": "seantis.reservation.utils.get_date_range", "line_number": 582, "usage_type": "call"}, {"api_name": "seantis.reservation.utils", "line_number": 582, "usage_type": "name"}, {"api_name": "zope.interface.Invalid", "line_number": 588, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 588, "usage_type": "call"}, {"api_name": "zope.interface.invariant", "line_number": 577, "usage_type": "name"}, {"api_name": "zope.interface.Invalid", "line_number": 594, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 594, "usage_type": "call"}, {"api_name": "zope.interface.invariant", "line_number": 590, "usage_type": "name"}, {"api_name": "plone.directives.form.Schema", "line_number": 600, "usage_type": "attribute"}, {"api_name": "plone.directives.form", "line_number": 600, "usage_type": "name"}, {"api_name": "zope.schema.TextLine", "line_number": 603, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 603, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 604, "usage_type": "call"}, {"api_name": "zope.schema.Date", "line_number": 607, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 607, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 608, "usage_type": "call"}, {"api_name": "zope.schema.Date", "line_number": 611, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 611, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 612, "usage_type": "call"}, {"api_name": "zope.interface.Invalid", "line_number": 618, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 618, "usage_type": "call"}, {"api_name": "zope.interface.invariant", "line_number": 615, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 620, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 631, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 635, "usage_type": "call"}, {"api_name": "plone.directives.form.Schema", "line_number": 642, "usage_type": "attribute"}, {"api_name": "plone.directives.form", "line_number": 642, "usage_type": "name"}, {"api_name": "zope.schema.Choice", "line_number": 645, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 645, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 646, "usage_type": "call"}, {"api_name": "zope.schema.TextLine", "line_number": 650, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 650, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 651, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 652, "usage_type": "call"}, {"api_name": "seantis.reservation.mail_templates.templates", "line_number": 655, "usage_type": "name"}, {"api_name": "zope.schema.Text", "line_number": 658, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 658, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 659, "usage_type": "call"}, {"api_name": "seantis.reservation.mail_templates.templates", "line_number": 661, "usage_type": "name"}, {"api_name": "zope.schema.TextLine", "line_number": 664, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 664, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 665, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 666, "usage_type": "call"}, {"api_name": "seantis.reservation.mail_templates.templates", "line_number": 670, "usage_type": "name"}, {"api_name": "zope.schema.Text", "line_number": 673, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 673, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 674, "usage_type": "call"}, {"api_name": "seantis.reservation.mail_templates.templates", "line_number": 676, "usage_type": "name"}, {"api_name": "zope.schema.TextLine", "line_number": 679, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 679, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 680, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 681, "usage_type": "call"}, {"api_name": "seantis.reservation.mail_templates.templates", "line_number": 685, "usage_type": "name"}, {"api_name": "zope.schema.Text", "line_number": 688, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 688, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 689, "usage_type": "call"}, {"api_name": "seantis.reservation.mail_templates.templates", "line_number": 691, "usage_type": "name"}, {"api_name": "zope.schema.TextLine", "line_number": 694, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 694, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 695, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 696, "usage_type": "call"}, {"api_name": "seantis.reservation.mail_templates.templates", "line_number": 698, "usage_type": "name"}, {"api_name": "zope.schema.Text", "line_number": 701, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 701, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 702, "usage_type": "call"}, {"api_name": "seantis.reservation.mail_templates.templates", "line_number": 704, "usage_type": "name"}, {"api_name": "zope.schema.TextLine", "line_number": 707, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 707, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 708, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 709, "usage_type": "call"}, {"api_name": "seantis.reservation.mail_templates.templates", "line_number": 711, "usage_type": "name"}, {"api_name": "zope.schema.Text", "line_number": 714, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 714, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 715, "usage_type": "call"}, {"api_name": "seantis.reservation.mail_templates.templates", "line_number": 717, "usage_type": "name"}, {"api_name": "zope.schema.TextLine", "line_number": 720, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 720, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 721, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 722, "usage_type": "call"}, {"api_name": "seantis.reservation.mail_templates.templates", "line_number": 724, "usage_type": "name"}, {"api_name": "zope.schema.Text", "line_number": 727, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 727, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 728, "usage_type": "call"}, {"api_name": "seantis.reservation.mail_templates.templates", "line_number": 730, "usage_type": "name"}, {"api_name": "seantis.reservation.utils.get_current_site_language", "line_number": 735, "usage_type": "call"}, {"api_name": "seantis.reservation.utils", "line_number": 735, "usage_type": "name"}, {"api_name": "z3c.form.widget.ComputedWidgetAttribute", "line_number": 737, "usage_type": "call"}, {"api_name": "z3c.form.widget", "line_number": 737, "usage_type": "name"}, {"api_name": "zope.interface.Interface", "line_number": 742, "usage_type": "name"}, {"api_name": "zope.schema.Int", "line_number": 745, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 745, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 746, "usage_type": "call"}, {"api_name": "zope.schema.Date", "line_number": 751, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 751, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 752, "usage_type": "call"}, {"api_name": "zope.schema.Time", "line_number": 756, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 756, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 757, "usage_type": "call"}, {"api_name": "zope.schema.Time", "line_number": 761, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 761, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 762, "usage_type": "call"}, {"api_name": "zope.schema.Int", "line_number": 766, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 766, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 767, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 773, "usage_type": "call"}, {"api_name": "zope.schema.TextLine", "line_number": 777, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 777, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 778, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 779, "usage_type": "call"}, {"api_name": "zope.schema.Text", "line_number": 783, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 783, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 784, "usage_type": "call"}, {"api_name": "zope.interface.Interface", "line_number": 789, "usage_type": "name"}, {"api_name": "zope.schema.Text", "line_number": 793, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 793, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 794, "usage_type": "call"}, {"api_name": "zope.schema.Int", "line_number": 803, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 803, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 803, "usage_type": "call"}, {"api_name": "zope.interface.Interface", "line_number": 807, "usage_type": "name"}, {"api_name": "zope.schema.Text", "line_number": 810, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 810, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 811, "usage_type": "call"}, {"api_name": "zope.schema.Int", "line_number": 815, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 815, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 816, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 822, "usage_type": "call"}, {"api_name": "zope.schema.Bool", "line_number": 830, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 830, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 831, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 832, "usage_type": "call"}, {"api_name": "zope.schema.Text", "line_number": 838, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 838, "usage_type": "name"}, {"api_name": "seantis.reservation._", "line_number": 839, "usage_type": "call"}, {"api_name": "seantis.reservation._", "line_number": 840, "usage_type": "call"}, {"api_name": "zope.interface.Interface", "line_number": 849, "usage_type": "name"}, {"api_name": "zope.interface.Attribute", "line_number": 856, "usage_type": "call"}, {"api_name": "zope.interface.Interface", "line_number": 859, "usage_type": "name"}, {"api_name": "zope.interface.Attribute", "line_number": 862, "usage_type": "call"}, {"api_name": "zope.interface.Attribute", "line_number": 863, "usage_type": "call"}, {"api_name": "zope.interface.Attribute", "line_number": 884, "usage_type": "call"}, {"api_name": "zope.interface.Interface", "line_number": 892, "usage_type": "name"}, {"api_name": "zope.interface.Attribute", "line_number": 900, "usage_type": "call"}, {"api_name": "zope.interface.Attribute", "line_number": 901, "usage_type": "call"}, {"api_name": "zope.interface.Attribute", "line_number": 911, "usage_type": "call"}, {"api_name": "zope.interface.Attribute", "line_number": 921, "usage_type": "call"}, {"api_name": "zope.interface.Attribute", "line_number": 922, "usage_type": "call"}, {"api_name": "zope.interface.Interface", "line_number": 926, "usage_type": "name"}]} +{"seq_id": "648304712", "text": "from __future__ import division\nfrom GraphManager import GraphManager\nfrom Similarity import Similarity\nimport cPickle as pickle\nimport graph_tool.all as gt\nimport numpy as np\nimport scipy.sparse as ss\nimport scipy.sparse.linalg as ssla\nimport time\n\n\nif __name__ == '__main__':\n\n np.random.seed(42)\n\n gm = GraphManager()\n g = gm.create_graph('csx_graph.gt')\n A = gt.adjacency(g)\n A = A - ss.diags(A.diagonal())\n A_tril = ss.csr_matrix(ss.tril(A))\n S_tril = ss.csr_matrix(A_tril.shape)\n print('created adjacency matrix')\n\n Sim = Similarity()\n print('loading feature matrix')\n X = ss.load_npz('feature_matrix.npz')\n with open('labels.pkl', 'rb') as f:\n y = pickle.load(f)\n print('feature matrix and labels loaded')\n\n print('creating index mapping')\n yint = np.array([int(i) for i in y])\n u, indices = np.unique(yint, return_index=True)\n u_map = {u[i]: indices[i] for i in xrange(u.shape[0])}\n\n print('starting similarities')\n rows, cols = A_tril.nonzero()\n total = rows.shape[0]\n idx = 0\n t0 = time.time()\n for row, col in zip(rows, cols):\n label_x = g.vp['id'][row]\n label_y = g.vp['id'][col]\n label_idx_x = u_map[int(label_x)]\n label_idx_y = u_map[int(label_y)]\n S_tril[row, col] = \\\n Sim.jaccard_quick(X[label_idx_x, :], X[label_idx_y, :])\n if idx % 1000 == 0:\n s = 'time elapsed: {} | idx: {} / {}'\n print(s.format(time.time() - t0, idx, total))\n t0 = time.time()\n if idx == 250000:\n break\n idx += 1\n\n print('saving first {}'.format(idx))\n ss.save_npz('similarity_matrix_tril.npz', S_tril)\n", "sub_path": "scripts/compute_similarities.py", "file_name": "compute_similarities.py", "file_ext": "py", "file_size_in_byte": 1689, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.random.seed", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 14, "usage_type": "attribute"}, {"api_name": "GraphManager.GraphManager", "line_number": 16, "usage_type": "call"}, {"api_name": "graph_tool.all.adjacency", "line_number": 18, "usage_type": "call"}, {"api_name": "graph_tool.all", "line_number": 18, "usage_type": "name"}, {"api_name": "scipy.sparse.diags", "line_number": 19, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 19, "usage_type": "name"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 20, "usage_type": "name"}, {"api_name": "scipy.sparse.tril", "line_number": 20, "usage_type": "call"}, {"api_name": "scipy.sparse.csr_matrix", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 21, "usage_type": "name"}, {"api_name": "Similarity.Similarity", "line_number": 24, "usage_type": "call"}, {"api_name": "scipy.sparse.load_npz", "line_number": 26, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 26, "usage_type": "name"}, {"api_name": "cPickle.load", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 33, "usage_type": "call"}, {"api_name": "time.time", "line_number": 40, "usage_type": "call"}, {"api_name": "time.time", "line_number": 50, "usage_type": "call"}, {"api_name": "time.time", "line_number": 51, "usage_type": "call"}, {"api_name": "scipy.sparse.save_npz", "line_number": 57, "usage_type": "call"}, {"api_name": "scipy.sparse", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "651502459", "text": "import setup\nimport machine\nimport time\nimport serial\n\n\nclass Tasks:\n def __init__(self):\n print(\"started tasks\")\n self.pins = setup.Pins()\n # hardware interface configuration\n self.stim1Pin = self.pins.dout25\n self.stim2Pin = self.pins.dout26\n self.trigger = self.pins.dout32\n self.ser = serial.Serial()\n # physical button to start running task\n self.runButton = self.pins.din12\n\n ##task parameters\n ## number of trials\n self.numTrials = 10\n ## interval between trials (in milliseconds)\n self.iti = 1000\n ## interval between stimulus (in milliseconds)\n self.isi = 500\n ## stimulus1 duration (in milliseconds)\n self.stim1Dur = 50\n ## stimulus2 duration (in milliseconds)\n self.stim2Dur = 100\n\n def interval(self, duration=100, funcs=None):\n\n # little excerpt to be expanded later\n # the idea is to pass an object with functions that can be executed\n # in the timing part of this routine\n # try:\n # len(funcs)\n # funcsLen = 0\n # except:\n # funcsLen = len(funcs)\n\n duration = duration * 1000 # move duration to microseconds\n startTiming = time.ticks_us() # get microsecond counter\n stopTiming = time.ticks_us() # get microsecond counter\n while stopTiming - startTiming < duration:\n \n stopTiming = time.ticks_us()\n\n\n def task1(self):\n for i in range(self.numTrials):\n # start with an ITI\n self.interval(duration=self.iti)\n # send a triger signal and stimulus 1 signal and run the interval runs\n self.trigger.value(1)\n self.stim1Pin.value(1)\n self.interval(duration=self.stim1Dur)\n self.stim1Pin.value(0)\n self.trigger.value(0)\n # wait for the interstimulus interval\n self.interval(duration=self.isi)\n # do the same for stimulus 2\n self.trigger.value(1)\n self.stim2Pin.value(1)\n self.interval(duration=self.stim1Dur)\n self.stim2Pin.value(0)\n self.trigger.value(0)\n # wait for the interstimulus interval\n self.interval(duration=self.isi)\n\n def startTasks(self):\n\n ran = 0\n while ran < 50:\n # test to see what is the status of the signal button\n self.startSignal = self.runButton.value()\n \n ran = ran+1\n ##print(\"ran: \"+ str(ran))\n\n serialSignal = self.ser.readDataPoll(timeout=100)\n ##print(\"raw: \")\n ##print(serialSignal)\n\n serialSignal = serialSignal.decode()\n \n ##print(serialSignal[0])\n\n print(serialSignal[0]==\"A\")\n print(serialSignal[0]==\"a\")\n\n # if the button was pressed, or if a signal came from the serial port, start the task\n if self.startSignal == 1:\n self.task1()\n print(\"task 1 completed\")\n if serialSignal[0] == \"A\":\n self.task1()\n print(\"task A completed\")\n if serialSignal[0] == \"X\":\n ran = 50\n print(\"exiting\")\n \n return\n", "sub_path": "software/tasks.py", "file_name": "tasks.py", "file_ext": "py", "file_size_in_byte": 3319, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "setup.Pins", "line_number": 10, "usage_type": "call"}, {"api_name": "serial.Serial", "line_number": 15, "usage_type": "call"}, {"api_name": "time.ticks_us", "line_number": 43, "usage_type": "call"}, {"api_name": "time.ticks_us", "line_number": 44, "usage_type": "call"}, {"api_name": "time.ticks_us", "line_number": 47, "usage_type": "call"}]} +{"seq_id": "492939389", "text": "import csv\nimport urllib\n\nfrom django.contrib.admin import utils\nfrom django.utils.text import capfirst\nfrom django.http import HttpResponse\nfrom django.utils.html import format_html\nfrom django.core.urlresolvers import reverse\nfrom django.utils.safestring import SafeText\n\n\nclass Link(object):\n def __init__(self, url, value):\n self.url = url\n self.value = value\n\n def __unicode__(self):\n return SafeText(format_html(\n '{value}', url=self.url, value=self.value))\n\n\ndef link_url(route, **params):\n url = reverse(route)\n if params:\n url += '?' + urllib.urlencode(sorted(params.items()))\n return url\n\n\ndef link(value, route, **params):\n url = link_url(route, **params)\n return Link(url, value)\n\n\ndef display_field(name, model, model_admin):\n empty_value_display = model_admin.get_empty_value_display()\n field, _, value = utils.lookup_field(name, model, model_admin)\n\n if isinstance(value, Link):\n value = value.value\n\n if isinstance(value, bool):\n return value\n else:\n return utils.display_for_field(value, field, empty_value_display)\n\n\ndef get_csv_headings(model_admin, request, queryset, fields):\n return [\n capfirst(utils.label_for_field(name, queryset.model, model_admin))\n for name in fields\n ]\n\n\ndef get_csv_row(model, model_admin, request, queryset, fields):\n row = []\n\n for name in fields:\n value = display_field(name, model, model_admin)\n row.append(unicode(value).encode('utf-8'))\n\n return row\n\n\ndef get_csv_filename(model_admin, request, queryset):\n return '%s.csv' % queryset.model._meta.verbose_name_plural\n\n\ndef get_fields(model_admin, request, queryset, exclude):\n exclude = set(exclude)\n return [\n field for field in model_admin.list_display\n if field not in exclude]\n\n\ndef define_download_as_csv(exclude=()):\n def download_as_csv_fn(model_admin, request, queryset):\n fields = get_fields(model_admin, request, queryset, exclude)\n filename = get_csv_filename(model_admin, request, queryset)\n\n response = HttpResponse(content_type='text/csv; charset=utf-8')\n response['Content-Disposition'] = 'attachment;filename=%s' % filename\n writer = csv.writer(response)\n\n writer.writerow(\n get_csv_headings(model_admin, request, queryset, fields))\n\n for model in queryset:\n writer.writerow(\n get_csv_row(model, model_admin, request, queryset, fields))\n\n return response\n\n download_as_csv_fn.short_description = \"Download selected as csv\"\n return download_as_csv_fn\n\n\ndownload_as_csv = define_download_as_csv()\n", "sub_path": "mentorship/mentor/admin_utils.py", "file_name": "admin_utils.py", "file_ext": "py", "file_size_in_byte": 2696, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.utils.safestring.SafeText", "line_number": 18, "usage_type": "call"}, {"api_name": "django.utils.html.format_html", "line_number": 18, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 23, "usage_type": "call"}, {"api_name": "urllib.urlencode", "line_number": 25, "usage_type": "call"}, {"api_name": "django.contrib.admin.utils.lookup_field", "line_number": 36, "usage_type": "call"}, {"api_name": "django.contrib.admin.utils", "line_number": 36, "usage_type": "name"}, {"api_name": "django.contrib.admin.utils.display_for_field", "line_number": 44, "usage_type": "call"}, {"api_name": "django.contrib.admin.utils", "line_number": 44, "usage_type": "name"}, {"api_name": "django.utils.text.capfirst", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.admin.utils.label_for_field", "line_number": 49, "usage_type": "call"}, {"api_name": "django.contrib.admin.utils", "line_number": 49, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 80, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "184759427", "text": "import numpy as np\nfrom astropy.modeling.models import Gaussian2D\nfrom astropy.modeling.models import AiryDisk2D\n\nfrom holopy.utils.transferfunctions import otf\n\nclass Apodizer(object):\n\n def __init__(self, function, shape, **kwargs):\n\n if not isinstance(shape, tuple):\n if isinstance(shape, int):\n shape = (shape, shape)\n self.shape = shape\n self.center = ((self.shape[0] + 1) / 2, (self.shape[1] + 1) / 2)\n\n if function == 'Gaussian':\n if 'radius' in kwargs:\n # Copying the radius keyword argument into the proper Gaussian2D keywords\n kwargs['x_stddev'] = kwargs['radius']\n kwargs['y_stddev'] = kwargs['radius']\n del kwargs['radius']\n self.model = Gaussian2D(x_mean=self.center[0], y_mean=self.center[1], **kwargs)\n elif function == 'Airy':\n self.model = AiryDisk2D(x_0=self.center[0], y_0=self.center[1], **kwargs)\n else:\n raise ValueError(\"Function value <{}> for Apodizer class is not recognized!\".format(function))\n\n\n def __call__(self):\n y, x = np.mgrid[0:self.shape[0], 0:self.shape[1]]\n return self.model(x, y)\n\n\n def apodize(self, object):\n y, x = np.mgrid[0:self.shape[0], 0:self.shape[1]]\n apodization_psf = self.model(x, y)\n apodization_otf = otf(apodization_psf)\n return np.multiply(object, apodization_otf)\n # tmp = np.multiply(object, apodization_otf)\n # return otf(tmp, inverse=True)\n", "sub_path": "specklepy/deprecated/apodizer.py", "file_name": "apodizer.py", "file_ext": "py", "file_size_in_byte": 1538, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "astropy.modeling.models.Gaussian2D", "line_number": 23, "usage_type": "call"}, {"api_name": "astropy.modeling.models.AiryDisk2D", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.mgrid", "line_number": 31, "usage_type": "attribute"}, {"api_name": "numpy.mgrid", "line_number": 36, "usage_type": "attribute"}, {"api_name": "holopy.utils.transferfunctions.otf", "line_number": 38, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "559858324", "text": "#!/usr/bin/python3\nfrom flask import Flask #https://flask.palletsprojects.com/\nfrom flask import send_from_directory #Documentation for the flask library\nfrom flask import jsonify\nfrom flask import Response\nfrom flask import request\nfrom flask import redirect\nfrom flask import url_for\nfrom flask import session\nfrom flask_caching import Cache\nimport db.connection as connection\nimport db.classinfo as ClassInfo\nimport db.courses as Courses\nimport db.semester_info as SemesterInfo\nimport db.semester_date_mapping as DateMapping\nimport db.admin as AdminInfo\nimport db.student_course_selection as CourseSelect\nimport db.user as UserModel\nimport controller.user as user_controller\nimport controller.session as session_controller\nimport controller.userevent as event_controller\nfrom io import StringIO\nimport json\nimport os\nimport pandas as pd\nfrom constants import Constants\n\"\"\"\nNOTE: on caching\non add of semester of change of data from GET\ndo a cache.clear() to ensure data integrity\n\"\"\"\ncache = Cache(config={'CACHE_TYPE': 'simple'})\n\napp = Flask(__name__)\napp.secret_key = os.environ.get(\"FLASK_SIGN_KEY\", \"localTestingKey\")\ncache.init_app(app)\n\n# - init interfaces to db\ndb_conn = connection.db\nclass_info = ClassInfo.ClassInfo(db_conn)\ncourses = Courses.Courses(db_conn, cache)\ndate_range_map = DateMapping.semester_date_mapping(db_conn)\nadmin_info = AdminInfo.Admin(db_conn)\ncourse_select = CourseSelect.student_course_selection(db_conn)\nsemester_info = SemesterInfo.semester_info(db_conn)\nusers = UserModel.User()\n\ndef is_admin_user():\n if 'user' in session and (session['user']['admin'] or session['user']['super_admin']):\n return True\n return False\n\n@app.route('/')\n@cache.cached(timeout=Constants.HOUR_IN_SECONDS)\ndef root():\n return \"YACS API is Up!\"\n\n@app.route('/api/')\ndef apiroot():\n return \"wow\"\n\n# - data routes\n\n@app.route('/api/class', methods=['GET'])\n@cache.cached(timeout=Constants.HOUR_IN_SECONDS, query_string=True)\ndef get_classes():\n \"\"\"\n GET /api/class?semester={}&search={}\n Cached: 1 Hour\n \"\"\"\n semester = request.args.get(\"semester\", default=None)\n search = request.args.get(\"search\", default=None)\n if semester:\n if not semester_info.is_public(semester):\n if is_admin_user():\n classes, error = class_info.get_classes_full(semester)\n return jsonify(classes) if not error else Response(error, status=500)\n return Response(\"Semester isn't available\", status=401)\n if search is not None:\n classes, error = class_info.get_classes_by_search(semester, search)\n else:\n classes, error = class_info.get_classes_full(semester)\n return jsonify(classes) if not error else Response(error, status=500)\n return Response(\"missing semester option\", status=400)\n@app.route('/api/department', methods=['GET'])\n@cache.cached(timeout=Constants.HOUR_IN_SECONDS)\ndef get_departments():\n \"\"\"\n GET /api/department\n Cached: 1 Hour\n\n List of departments i.e. COGS, CIVL, CSCI, BIOL\n \"\"\"\n departments, error = class_info.get_departments()\n return jsonify(departments) if not error else Response(error, status=500)\n\n@app.route('/api/subsemester', methods=['GET'])\n@cache.cached(timeout=Constants.HOUR_IN_SECONDS, query_string=True)\ndef get_subsemesters():\n \"\"\"\n GET /api/subsemester?semester={}\n Cached: 1 Hour\n\n Get list of departments i.e. COGS, CIVL, CSCI, BIOL\n (Used in dropdown in \"Course Search\"\n \"\"\"\n semester = request.args.get(\"semester\", default=None)\n if semester:\n subsemesters, error = class_info.get_subsemesters(semester)\n return jsonify(subsemesters) if not error else Response(error, status=500)\n # Some cases, we do want all subsemesters across all semesters like in Admin Panel\n subsemesters, error = class_info.get_subsemesters()\n return jsonify(subsemesters) if not error else Response(error, status=500)\n\n@app.route('/api/semester', methods=['GET'])\n@cache.cached(timeout=Constants.DAY_IN_SECONDS)\ndef get_semesters():\n \"\"\"\n GET /api/semester\n Cached: 24 Hours\n \"\"\"\n semesters, error = class_info.get_semesters()\n return jsonify(semesters) if not error else Response(error, status=500)\n\n@app.route('/api/semesterInfo', methods=['GET'])\ndef get_all_semester_info():\n all_semester_info, error = class_info.get_all_semester_info()\n return jsonify(all_semester_info) if not error else Response(error, status=500)\n\n@app.route('/api/defaultsemester', methods=['GET'])\ndef get_defaultSemester():\n semester, error = admin_info.get_semester_default()\n return jsonify(semester) if not error else Response(error, status=500)\n\n@app.route('/api/defaultsemesterset', methods=['POST'])\ndef set_defaultSemester():\n info = request.get_json()\n success, error = admin_info.set_semester_default(info['default'])\n if success:\n return Response(status=200)\n else:\n print(error)\n return Response(error.__str__(), status=500)\n\n#Parses the data from the .csv data files\n@app.route('/api/bulkCourseUpload', methods=['POST'])\ndef uploadHandler():\n # check for user files\n if not request.files:\n return Response(\"No file received\", 400)\n if request.files['file'].filename.rsplit('.', 1)[1].lower() != 'csv':\n return Response(\"File must have csv extension\", 400)\n # get file\n csv_file = StringIO(request.files['file'].read().decode())\n # update semester infos based on isPubliclyVisible, hiding semester if needed\n is_publicly_visible = request.form.get(\"isPubliclyVisible\", default=False)\n semesters = pd.read_csv(csv_file)['semester'].unique()\n for semester in semesters:\n semester_info.upsert(semester, is_publicly_visible)\n # Like C, the cursor will be at EOF after full read, so reset to beginning\n csv_file.seek(0)\n # Clear out course data of the same semester before population due to\n # data source (E.g. SIS & Acalog Catalog) possibly updating/removing/adding\n # courses.\n courses.bulk_delete(semesters=semesters)\n # Populate DB from CSV\n isSuccess, error = courses.populate_from_csv(csv_file)\n if (isSuccess):\n return Response(status=200)\n else:\n print(error)\n return Response(error.__str__(), status=500)\n\n@app.route('/api/mapDateRangeToSemesterPart', methods=['POST'])\ndef map_date_range_to_semester_part_handler():\n # This depends on date_start, date_end, and semester_part_name being\n # ordered since each field has multiple entries. They should be ordered\n # as each dict entry has the value of list. But if it doesn't work,\n # look into request.parameter_storage_class which will change the default\n # ImmutableMultiDict class that request.form uses. https://flask.palletsprojects.com/en/1.0.x/patterns/subclassing/\n if (request.form):\n # If checkbox is false, then, by design, it is not included in the form data.\n is_publicly_visible = request.form.get('isPubliclyVisible', default=False)\n semester_title = request.form.get('semesterTitle')\n semester_part_names = request.form.getlist('semester_part_name')\n start_dates = request.form.getlist('date_start')\n end_dates = request.form.getlist('date_end')\n if (start_dates and end_dates and semester_part_names and is_publicly_visible is not None and semester_title):\n _, error = date_range_map.insert_all(start_dates, end_dates, semester_part_names)\n semester_info.upsert(semester_title, is_publicly_visible)\n if (not error):\n return Response(status=200)\n else:\n return Response(error, status=500)\n return Response(\"Did not receive proper form data\", status=500)\n\n\n# - user system api\n@app.route('/api/user/', methods=['GET'])\ndef get_user_info(session_id):\n if 'user' not in session:\n return Response(\"Not authorized\", status=403)\n\n return user_controller.get_user_info(session_id)\n\n\n@app.route('/api/user', methods=['POST'])\ndef add_user():\n return user_controller.add_user(request.json)\n\n\n@app.route('/api/user', methods=['DELETE'])\ndef delete_user():\n if 'user' not in session:\n return Response(\"Not authorized\", status=403)\n\n return user_controller.delete_user(request.json)\n\n\n@app.route('/api/user', methods=['PUT'])\ndef update_user_info():\n if 'user' not in session:\n return Response(\"Not authorized\", status=403)\n\n return user_controller.update_user(request.json)\n\n\n@app.route('/api/session', methods=['POST'])\ndef log_in():\n session_res = session_controller.add_session(request.json).json\n if (session_res['success']):\n session_data = session_res['content']\n # [0] b/c conn.exec uses fetchall() which wraps result in list\n user = users.get_user(uid=session_data['uid'])[0]\n session['user'] = user\n # https://flask.palletsprojects.com/en/1.1.x/api/?highlight=session#flask.session.permanent\n session.permanent = False\n return session_res\n\n\n@app.route('/api/session', methods=['DELETE'])\ndef log_out():\n response = session_controller.delete_session(request.json)\n\n if response.get_json()['success']:\n session.pop('user', None)\n\n return response\n\n\n@app.route('/api/event', methods=['POST'])\ndef add_user_event():\n return event_controller.add_event(json.loads(request.data))\n\n@app.route('/api/user/course', methods=['POST'])\ndef add_student_course():\n info = request.get_json()\n\n if 'user' not in session:\n return Response(\"Not authorized\", status=403)\n\n resp, error = course_select.add_selection(info['name'], info['semester'], session['user']['user_id'], info['cid'])\n return Response(status=200) if not error else Response(error, status=500)\n\n\n@app.route('/api/user/course', methods=['DELETE'])\ndef remove_student_course():\n info = request.json\n\n if 'user' not in session:\n return Response(\"Not authorized\", status=403)\n\n resp, error = course_select.remove_selection(info['name'], info['semester'], session['user']['user_id'], info['cid'])\n return Response(status=200) if not error else Response(error, status=500)\n\n@app.route('/api/user/course', methods=['GET'])\ndef get_student_courses():\n if 'user' not in session:\n return Response(\"Not authorized\", status=403)\n\n courses, error = course_select.get_selection(session['user']['user_id'])\n return jsonify(courses) if not error else Response(error, status=500)\n\nif __name__ == '__main__':\n app.run(debug=os.environ.get('DEBUG', 'True'), host='0.0.0.0', port=5000)\n", "sub_path": "src/api/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 10603, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "flask_caching.Cache", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.Flask", "line_number": 34, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 35, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 35, "usage_type": "attribute"}, {"api_name": "db.connection.db", "line_number": 39, "usage_type": "attribute"}, {"api_name": "db.connection", "line_number": 39, "usage_type": "name"}, {"api_name": "db.classinfo.ClassInfo", "line_number": 40, "usage_type": "call"}, {"api_name": "db.classinfo", "line_number": 40, "usage_type": "name"}, {"api_name": "db.courses.Courses", "line_number": 41, "usage_type": "call"}, {"api_name": "db.courses", "line_number": 41, "usage_type": "name"}, {"api_name": "db.semester_date_mapping.semester_date_mapping", "line_number": 42, "usage_type": "call"}, {"api_name": "db.semester_date_mapping", "line_number": 42, "usage_type": "name"}, {"api_name": "db.admin.Admin", "line_number": 43, "usage_type": "call"}, {"api_name": "db.admin", "line_number": 43, "usage_type": "name"}, {"api_name": "db.student_course_selection.student_course_selection", "line_number": 44, "usage_type": "call"}, {"api_name": "db.student_course_selection", "line_number": 44, "usage_type": "name"}, {"api_name": "db.semester_info.semester_info", "line_number": 45, "usage_type": "call"}, {"api_name": "db.semester_info", "line_number": 45, "usage_type": "name"}, {"api_name": "db.user.User", "line_number": 46, "usage_type": "call"}, {"api_name": "db.user", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 49, "usage_type": "name"}, {"api_name": "constants.Constants.HOUR_IN_SECONDS", "line_number": 54, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 54, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 71, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 71, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 77, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 78, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 83, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 84, "usage_type": "call"}, {"api_name": "constants.Constants.HOUR_IN_SECONDS", "line_number": 65, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 95, "usage_type": "call"}, {"api_name": "constants.Constants.HOUR_IN_SECONDS", "line_number": 86, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 86, "usage_type": "name"}, {"api_name": "flask.request.args.get", "line_number": 107, "usage_type": "call"}, {"api_name": "flask.request.args", "line_number": 107, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 107, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 110, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 113, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 113, "usage_type": "call"}, {"api_name": "constants.Constants.HOUR_IN_SECONDS", "line_number": 98, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 98, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 123, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 123, "usage_type": "call"}, {"api_name": "constants.Constants.DAY_IN_SECONDS", "line_number": 116, "usage_type": "attribute"}, {"api_name": "constants.Constants", "line_number": 116, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 128, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 137, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 137, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 140, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 143, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 149, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 149, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 150, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 151, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 151, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 152, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 154, "usage_type": "call"}, {"api_name": "flask.request.files", "line_number": 154, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 154, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 156, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 156, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 156, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 157, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 169, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 172, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 181, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 181, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 183, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 183, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 183, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 184, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 184, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 184, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 185, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 185, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 185, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 186, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 186, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 186, "usage_type": "name"}, {"api_name": "flask.request.form.getlist", "line_number": 187, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 187, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 187, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 192, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 194, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 195, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 201, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 202, "usage_type": "call"}, {"api_name": "controller.user.get_user_info", "line_number": 204, "usage_type": "call"}, {"api_name": "controller.user", "line_number": 204, "usage_type": "name"}, {"api_name": "controller.user.add_user", "line_number": 209, "usage_type": "call"}, {"api_name": "controller.user", "line_number": 209, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 209, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 209, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 214, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 215, "usage_type": "call"}, {"api_name": "controller.user.delete_user", "line_number": 217, "usage_type": "call"}, {"api_name": "controller.user", "line_number": 217, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 217, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 217, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 222, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 223, "usage_type": "call"}, {"api_name": "controller.user.update_user", "line_number": 225, "usage_type": "call"}, {"api_name": "controller.user", "line_number": 225, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 225, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 225, "usage_type": "name"}, {"api_name": "controller.session.add_session", "line_number": 230, "usage_type": "call"}, {"api_name": "controller.session", "line_number": 230, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 230, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 230, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 235, "usage_type": "name"}, {"api_name": "flask.session.permanent", "line_number": 237, "usage_type": "attribute"}, {"api_name": "flask.session", "line_number": 237, "usage_type": "name"}, {"api_name": "controller.session.delete_session", "line_number": 243, "usage_type": "call"}, {"api_name": "controller.session", "line_number": 243, "usage_type": "name"}, {"api_name": "flask.request.json", "line_number": 243, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 243, "usage_type": "name"}, {"api_name": "flask.session.pop", "line_number": 246, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 246, "usage_type": "name"}, {"api_name": "controller.userevent.add_event", "line_number": 253, "usage_type": "call"}, {"api_name": "controller.userevent", "line_number": 253, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 253, "usage_type": "call"}, {"api_name": "flask.request.data", "line_number": 253, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 253, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 257, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 257, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 259, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 260, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 262, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 263, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 268, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 268, "usage_type": "name"}, {"api_name": "flask.session", "line_number": 270, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 271, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 273, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 274, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 278, "usage_type": "name"}, {"api_name": "flask.Response", "line_number": 279, "usage_type": "call"}, {"api_name": "flask.session", "line_number": 281, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 282, "usage_type": "call"}, {"api_name": "flask.Response", "line_number": 282, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 285, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 285, "usage_type": "attribute"}]} +{"seq_id": "384224800", "text": "import logging\nimport re\n\nfrom django import forms\n\nfrom easy_select2.widgets import Select2TextInput\n\nlogger = logging.getLogger('advanced_filters.form_helpers')\n\nextra_spaces_pattern = re.compile('\\s+')\n\n\nclass VaryingTypeCharField(forms.CharField):\n def to_python(self, value):\n res = super(VaryingTypeCharField, self).to_python(value)\n split_res = res.split(\",\")\n if not res or len(split_res) < 2:\n return res.strip()\n # create a regex string out of the list of choices passed, i.e: (a|b)\n res = r\"({})\".format(\"|\".join(map(lambda x: x.strip(), split_res)))\n return res\n\n\nclass CleanWhiteSpacesMixin(object):\n def clean(self):\n \"\"\" Strip char fields \"\"\"\n cleaned_data = super(CleanWhiteSpacesMixin, self).clean()\n for k in self.cleaned_data:\n if isinstance(self.cleaned_data[k], basestring):\n cleaned_data[k] = re.sub(extra_spaces_pattern, ' ',\n self.cleaned_data[k] or '').strip()\n return cleaned_data\n\n\ndef get_select2textinput_widget(choices=None):\n \"\"\"\n Accepts django-style choices (tuple of tuples), prepares\n and returns an instance of a Select2TextInput widget.\n\n For more info on this custom widget, see doc here:\n http://django-easy-select2.readthedocs.org/en/latest/index.html\n \"\"\"\n attributes = {\n # select2 script takes data in json values such as:\n # 'data': [ {'id': 'value', 'text': 'description'}, ... ],\n }\n if choices:\n attributes['data'] = [{'id': c[0], 'text': unicode(c[1])}\n for c in choices]\n return Select2TextInput(select2attrs=attributes)\n", "sub_path": "advanced_filters/form_helpers.py", "file_name": "form_helpers.py", "file_ext": "py", "file_size_in_byte": 1703, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 10, "usage_type": "call"}, {"api_name": "django.forms.CharField", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.forms", "line_number": 13, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 30, "usage_type": "call"}, {"api_name": "easy_select2.widgets.Select2TextInput", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "190632387", "text": "#!/usr/bin/env python3\nimport json\nimport name_counter\n\nimport gensim\n\nwith open('asoiaf.txt', errors='ignore') as f:\n sentences = [line.strip().split(' ') for line in f.readlines()]\n\n# model = gensim.models.Word2Vec.load('./model')\n\n# if model:\nmodel = gensim.models.Word2Vec(sentences, workers=8, min_count=3, size=150)\n\n# model.save('model')\n# first_names = list(name_counter.first_names)\n# print(first_names)\n\n\nname_dict = name_counter.name_dict\nnames = name_counter.names\nfirst_names = name_counter.first_names\n\n\ndef find_similar(kw):\n # print('kw=', kw)\n result = model.most_similar(positive=kw)\n ret = []\n for k, s in result:\n if len(kw) > 2:\n # print(len(kw))\n a = k[0]\n b = k[1:]\n if k.isalpha and a.isupper() and b.islower():\n # print('=> ', k)\n first_names.append(k)\n else:\n if k not in first_names:\n continue\n\n for name in names:\n if name_dict[name] == k:\n k = name\n break\n ret.append({k: s})\n # print(k, s)\n # print('')\n return ret\n\n\nfor i in range(15):\n find_similar(first_names)\n\nprint(len(first_names))\nfirst_names = sorted(first_names)\nprint(first_names)\n\nf_n = []\nfirst_name_dict = name_counter.first_name_dict\n\nresult = {}\nfor first_name in first_names:\n res = find_similar([first_name])\n if res:\n f_n.append(first_name)\n if first_name in first_name_dict:\n first_name = first_name_dict[first_name]\n # print(first_name, res)\n result[first_name] = res\n\nfirst_names = f_n\nprint(len(first_names))\n# print(result)\nfamily_dict = name_counter.family_dict\n\njson_obj = []\n\n\ndef get_node_name(name):\n node_name = 'flare'\n last_name = ''\n first_name = ''\n if name in name_dict:\n first_name = name_dict[name]\n if name in family_dict:\n node_name = node_name + '.' + family_dict[name]\n last_name = family_dict[name]\n else:\n node_name = node_name + '.node' + str(hash(name))[3:6]\n if first_name + last_name:\n name = first_name + last_name\n node_name = node_name + '.' + name\n return node_name\n\n\nimports_nodes = set()\nnodes = set()\n\nfor first_name in first_names:\n node = {}\n name = first_name\n if first_name in first_name_dict:\n name = first_name_dict[first_name]\n node_name = get_node_name(name)\n nodes.add(node_name)\n node['name'] = node_name\n links = []\n res = result[name]\n for it in res:\n imports = get_node_name(list(it)[0])\n imports_nodes.add(imports)\n links.append(imports)\n # links.append(k)\n node['imports'] = links\n # print(node)\n json_obj.append(node)\n\nfor imports_node in imports_nodes:\n if imports_node not in nodes:\n node = {'name': imports_node, 'imports': []}\n json_obj.append(node)\n\nwith open('similarity.json', 'w') as f:\n json.dump(json_obj, f)\n", "sub_path": "similarity.py", "file_name": "similarity.py", "file_ext": "py", "file_size_in_byte": 2990, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "gensim.models.Word2Vec", "line_number": 13, "usage_type": "call"}, {"api_name": "gensim.models", "line_number": 13, "usage_type": "attribute"}, {"api_name": "name_counter.name_dict", "line_number": 20, "usage_type": "attribute"}, {"api_name": "name_counter.names", "line_number": 21, "usage_type": "attribute"}, {"api_name": "name_counter.first_names", "line_number": 22, "usage_type": "attribute"}, {"api_name": "name_counter.first_name_dict", "line_number": 59, "usage_type": "attribute"}, {"api_name": "name_counter.family_dict", "line_number": 74, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 124, "usage_type": "call"}]} +{"seq_id": "528235531", "text": "from flask import Flask, request, jsonify\n# from flask import redirect, url_for, render_template\n# import spacy\n# import nltkGenismCleanModel\n# import nltkGenism\nimport operations\nfrom flask import send_file\n# import pandas as pd\n\n\napp = Flask(__name__)\n# nlp = spacy.load('en_core_web_md')\n# nltkGenismCleanModel.buildModel()\n# df = pd.read_excel('Helpdesk_Report_part_cleaned_final.xlsx')\n# dataoutput = df[\"Close notes\"].tolist()\n\n# @app.route('/')\n# def index():\n# return render_template('text.html')\n\n\n@app.route('/suggestions/get', methods=['post'])\ndef getSuggestions():\n data = request.get_json()\n return jsonify(operations.getSuggestions(data[\"desc\"]))\n\n\n@app.route('/upsuggestions', methods=[\"post\"])\ndef putSuggestions():\n data = request.get_json()\n operations.addVotedResult(data[\"desc\"], data[\"close_note\"])\n return jsonify(\"success\")\n\n\n@app.route('/upsuggestions/addnote', methods=[\"post\"])\ndef putNotes():\n data = request.get_json()\n note_id = operations.addNote(data[\"id\"], data[\"note\"])\n return jsonify(note_id)\n\n\n@app.route('/upsuggestions/removeticket', methods=[\"post\"])\ndef removeTicket():\n data = request.get_json()\n operations.removeTicket(data[\"id\"])\n return jsonify(\"success\")\n\n\n@app.route('/upsuggestions/updateclousurenote', methods=[\"post\"])\ndef putCloseNotes():\n data = request.get_json()\n operations.updateClosureNote(data[\"id\"], data[\"close_note\"])\n return jsonify(\"success\")\n\n\n@app.route('/upsuggestions/addticket', methods=[\"post\"])\ndef putTicket():\n data = request.get_json()\n operations.addTicket(data[\"issue\"], data[\"closureNote\"])\n return jsonify(\"success\")\n\n\n@app.route('/upsuggestions/addfile', methods=[\"post\"])\ndef putFileName():\n data = request.get_json()\n atc_id = operations.addfile(data[\"id\"], data[\"fileName\"], data[\"uuid\"])\n return jsonify(atc_id)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n", "sub_path": "hack.py", "file_name": "hack.py", "file_ext": "py", "file_size_in_byte": 1910, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "flask.Flask", "line_number": 11, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.jsonify", "line_number": 25, "usage_type": "call"}, {"api_name": "operations.getSuggestions", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 30, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 30, "usage_type": "name"}, {"api_name": "operations.addVotedResult", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 37, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 37, "usage_type": "name"}, {"api_name": "operations.addNote", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 39, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 44, "usage_type": "name"}, {"api_name": "operations.removeTicket", "line_number": 45, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "operations.updateClosureNote", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 58, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 58, "usage_type": "name"}, {"api_name": "operations.addTicket", "line_number": 59, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "operations.addfile", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 67, "usage_type": "call"}]} +{"seq_id": "459018513", "text": "import cv2\r\nimport mediapipe as mp\r\nimport streamlit as st\r\nimport numpy as np\r\nfrom PIL import Image\r\n\r\n\r\nmp_drawing = mp.solutions.drawing_utils\r\nmp_objectron = mp.solutions.objectron\r\n\r\ndef main(image_file):\r\n with mp_objectron.Objectron(static_image_mode=True,\r\n max_num_objects=5,\r\n min_detection_confidence=0,\r\n model_name='Shoe') as objectron:\r\n image = image_file\r\n results = objectron.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))\r\n annotated_image = image.copy()\r\n for detected_object in results.detected_objects:\r\n mp_drawing.draw_landmarks(\r\n annotated_image, detected_object.landmarks_2d, mp_objectron.BOX_CONNECTIONS)\r\n mp_drawing.draw_axis(annotated_image, detected_object.rotation,\r\n detected_object.translation)\r\n return annotated_image\r\n\r\n\r\n\r\ndef app():\r\n html_temp = \"\"\"\r\n \r\n
\r\n

3D Objectron

\r\n
\r\n \r\n \"\"\"\r\n st.markdown(html_temp, unsafe_allow_html=True)\r\n\r\n\r\n image_file = st.file_uploader(\"Upload Image\", type=['jpg'])\r\n if image_file is not None:\r\n our_image = np.array(Image.open(image_file))\r\n st.text(\"DONE\")\r\n if st.button(\"Find\"): \r\n output = main(our_image)\r\n st.image(output)\r\n", "sub_path": "pages/objectron.py", "file_name": "objectron.py", "file_ext": "py", "file_size_in_byte": 1521, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "mediapipe.solutions", "line_number": 8, "usage_type": "attribute"}, {"api_name": "mediapipe.solutions", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 17, "usage_type": "attribute"}, {"api_name": "streamlit.markdown", "line_number": 36, "usage_type": "call"}, {"api_name": "streamlit.file_uploader", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 41, "usage_type": "name"}, {"api_name": "streamlit.text", "line_number": 42, "usage_type": "call"}, {"api_name": "streamlit.button", "line_number": 43, "usage_type": "call"}, {"api_name": "streamlit.image", "line_number": 45, "usage_type": "call"}]} +{"seq_id": "274614082", "text": "import configparser\nimport subprocess\n\nimport os\n\nfrom qtmanager.operators.widgets.widgets import Widgets\n\n\nclass Commands:\n \"\"\"Work with commands of a operator\"\"\"\n\n # их сделать не константами?\n COMMANDS_FILE = 'commands.ini'\n OPERATORS = [Widgets()]\n\n class Command:\n \"\"\"Describe a command\"\"\"\n\n def __init__(self, command=None, subcommand=None, data_example=None, description=None,\n shell_query=None, method=None, help=None, operator=None, argv=None) -> None:\n \"\"\"\n Initializes command\n :param command: Name command\n :type command: str\n :param subcommand: Name subcommand\n :type subcommand: str\n :param data_example: example text of input data\n :type data_example: str\n :param description: what command do\n :type description: str\n :param shell_query: code that the shell must to execute (regular expression)\n :type shell_query: r(str)\n :param method: method that should be called\n :type method: str\n :param help: error text when executing the command\n :type help: str\n :param operator: Operator whose command is executed\n :type operator: object\n :param argv: Params list\n :type argv: list\n \"\"\"\n self._command = command\n self._subcommand = subcommand\n self._data_example = data_example\n self._description = description\n self._shell_query = shell_query\n self._method = method\n self._help = help\n self._operator = operator\n self._argv = argv or []\n\n @property\n def command(self):\n return self._command\n\n @property\n def subcommand(self):\n return self._subcommand\n\n @property\n def operator(self):\n return self._operator\n\n @operator.setter\n def operator(self, value):\n self._operator = value\n\n @property\n def argv(self):\n return self._argv\n\n @argv.setter\n def argv(self, value):\n self._argv = value\n\n def __eq__(self, o) -> bool:\n if isinstance(o, Commands.Command) and self._command == o.command and \\\n self._subcommand == o.subcommand:\n return True\n\n return NotImplemented\n\n def __str__(self) -> str:\n desc = []\n\n s = '{:<20}'.format(self._command) if self._command else ' ' * 20\n desc.append(s)\n s = '{:<5}'.format(self._subcommand) if self._subcommand else ' ' * 5\n desc.append(s)\n s = '{:<30}'.format(self._data_example) if self._data_example else ' ' * 30\n desc.append(s)\n s = '{:<100}'.format(self._description) if self._description else ' ' * 100\n desc.append(s)\n\n return ''.join(desc)\n\n def is_correct(self):\n \"\"\"\n Checks to be Command correct.\n :return: True if is, and False if is not\n \"\"\"\n\n if self._command and self._description and self._method:\n return True\n\n return False\n\n def execute(self):\n \"\"\"calls the method defined in the command file.\"\"\"\n\n getattr(self._operator, self._method)(self)\n\n def execute_shell(self, argv):\n \"\"\"\n Execute a command code in the shell.\n :param argv: Params for the current command\n :type argv: list\n :return: 0, if done, and 1 if not done\n \"\"\"\n\n if self._shell_query:\n query = self._shell_query.format(*argv)\n code = subprocess.call(query, shell=True)\n if code:\n print(self._help)\n\n return code\n\n return 1\n\n def __init__(self) -> None:\n \"\"\"Initializes commands from read files\"\"\"\n\n self._commands = []\n\n for o in Commands.OPERATORS:\n try:\n path = os.path.join(o.root_path, self.COMMANDS_FILE)\n config = configparser.ConfigParser()\n config.read(path)\n\n for com in config:\n command = Commands.Command()\n for field in config[com]:\n _field = '_' + field\n #if config[com][field]:\n if config[com][field] and _field in command.__dict__:\n # почему без '_' он не может свой собственный создать хотя\n # в интерактивном режиме пайтона всё нормально?\n setattr(command, _field, config[com][field])\n elif _field not in command.__dict__:\n raise configparser.ParsingError(source=path)\n\n if command.is_correct():\n command.operator = o\n self._commands.append(command)\n except configparser.ParsingError as ex:\n print('File incorrect:', ex.source)\n\n def find_command(self, argv):\n \"\"\"\n Find current command\n :param argv: elements of the command in order.\n :type argv: list\n :return: Command if contains, and None if is not\n \"\"\"\n\n if len(argv) >= 2 and argv[1].startswith('-'):\n command = Commands.Command(argv[0], argv[1])\n _argv = argv[2:]\n elif len(argv) == 1 and argv[0] == 'help':\n print(*self._commands, sep='\\n')\n return None\n elif len(argv) >= 1:\n command = Commands.Command(argv[0])\n _argv = argv[1:]\n else:\n print(\"command don't have operator.\")\n return None\n\n for c in self._commands:\n if command == c:\n c.argv = _argv\n return c\n else:\n print('\"{0}\" operator not found'.format(argv[0]))\n\n return None\n", "sub_path": "qtmanager/utility/commands.py", "file_name": "commands.py", "file_ext": "py", "file_size_in_byte": 6207, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "qtmanager.operators.widgets.widgets.Widgets", "line_number": 14, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 123, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 139, "usage_type": "call"}, {"api_name": "configparser.ParsingError", "line_number": 152, "usage_type": "call"}, {"api_name": "configparser.ParsingError", "line_number": 157, "usage_type": "attribute"}]} +{"seq_id": "557522569", "text": "import requests\nfrom django.contrib.auth import login, logout\nfrom django.shortcuts import render, redirect\n\nfrom config.settings import ROOT_DIR, SECRETS\nfrom members.forms import SignupForm, LoginForm\nfrom members.models import User\n\n\ndef login_view(request):\n if request.method == 'POST':\n form = LoginForm(request.POST)\n if form.is_valid():\n # is_valid() 이후에 self.cleaned_data 가 반환되었다\n form.login(request)\n return redirect('posts:post-list')\n else:\n # GET을 할때는 form 형식만 보내주면 되기 때문에 LoginForm 내부에 request.POST를 안넣어 줘도 된다.\n form = LoginForm()\n\n login_base_url = 'https://nid.naver.com/oauth2.0/authorize'\n login_params = {\n 'response_type': 'code',\n 'client_id': SECRETS[\"NAVER_CLIENT_ID\"],\n 'redirect_url': 'http://localhost:8000/members/naver-login/',\n 'state': 'RANDOM_STATE',\n }\n login_url = '{base}?{params}'.format(\n base=login_base_url,\n params='&'.join([f'{key}={value}' for key, value in login_params.items()])\n )\n\n context = {\n 'form': form,\n 'login_url': login_url,\n }\n return render(request, 'members/login.html', context)\n\n\ndef signup_view(request):\n # POST 로 접근했을때\n if request.method == 'POST':\n form = SignupForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n return redirect('posts:post-list')\n else:\n form = SignupForm()\n\n context = {\n 'form': form,\n }\n return render(request, 'members/signup.html', context)\n\n\ndef logout_view(request):\n \"\"\"\n 로그인 되어있는지 확인하고 로그인 되어있을 경우 logout() 시키기.\n \"\"\"\n logout(request)\n return redirect('members:login')\n\n\ndef naver_login(request):\n # GET Parameter로 전달된 code값을 사용해서\n # 네이버 API의 php 샘플 코드를 보고\n # token_url을 생성\n # print(token_url)\n login_base_url = \"https://nid.naver.com/oauth2.0/token\"\n login_params = {\n 'grant_type': 'authorization_code',\n 'client_id': SECRETS[\"NAVER_CLIENT_ID\"],\n 'client_secret': SECRETS[\"NAVER_CLIENT_SECRET\"],\n 'redirect_url': 'http://localhost:8000/members/naver-login/',\n 'code': request.GET['code'],\n 'state': request.GET['state'],\n }\n\n # ClientID는 공개값\n # 네이버 로그인 버튼 9\"\n # ClientSecret는 비공개 값\n\n token_url = '{base}?{params}'.format(\n base=login_base_url,\n params='&'.join([f'{key}={value}' for key, value in login_params.items()])\n )\n\n print('토큰 유알엘은: ', token_url)\n # resposne 에 token_url 에 대한 응답이 저장되어 있다. response 에 저장되는 것은 response status이다\n response = requests.get(token_url)\n print(response)\n print('상태: ', response.status_code)\n access_token = response.json()['access_token']\n me_header = {\n 'Authorization': f\"Bearer {access_token}\"\n }\n me_url = \"https://openapi.naver.com/v1/nid/me\"\n me_response = requests.get(me_url, headers=me_header)\n me_response_data = me_response.json()\n print(me_response_data)\n unique_id = me_response_data['response']['id']\n print(unique_id)\n\n naver_username = f'n_{unique_id}'\n if not User.objects.filter(username=naver_username).exists():\n user = User.objects.create_user(username=naver_username)\n else:\n user = User.objects.get(username=naver_username)\n login(request, user)\n return redirect('posts:post-list')\n", "sub_path": "app/members/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 3680, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "members.forms.LoginForm", "line_number": 12, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 16, "usage_type": "call"}, {"api_name": "members.forms.LoginForm", "line_number": 19, "usage_type": "call"}, {"api_name": "config.settings.SECRETS", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 37, "usage_type": "call"}, {"api_name": "members.forms.SignupForm", "line_number": 43, "usage_type": "call"}, {"api_name": "django.contrib.auth.login", "line_number": 46, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 47, "usage_type": "call"}, {"api_name": "members.forms.SignupForm", "line_number": 49, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 54, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 61, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 62, "usage_type": "call"}, {"api_name": "config.settings.SECRETS", "line_number": 73, "usage_type": "name"}, {"api_name": "config.settings.SECRETS", "line_number": 74, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 91, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 99, "usage_type": "call"}, {"api_name": "members.models.User.objects.filter", "line_number": 106, "usage_type": "call"}, {"api_name": "members.models.User.objects", "line_number": 106, "usage_type": "attribute"}, {"api_name": "members.models.User", "line_number": 106, "usage_type": "name"}, {"api_name": "members.models.User.objects.create_user", "line_number": 107, "usage_type": "call"}, {"api_name": "members.models.User.objects", "line_number": 107, "usage_type": "attribute"}, {"api_name": "members.models.User", "line_number": 107, "usage_type": "name"}, {"api_name": "members.models.User.objects.get", "line_number": 109, "usage_type": "call"}, {"api_name": "members.models.User.objects", "line_number": 109, "usage_type": "attribute"}, {"api_name": "members.models.User", "line_number": 109, "usage_type": "name"}, {"api_name": "django.contrib.auth.login", "line_number": 110, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "103098302", "text": "###################################################################\n# File Name: capture_rate.py\n# Author: yaomingyue\n# mail: yaomingyue@fuanhua.com\n# Created Time: 2017年10月12日 星期四 09时52分13秒\n#=============================================================\n#!/usr/bin/env python\n#-*- coding:utf8 -*-\nimport sys\nimport argparse\n\ndef get_capture_region(bed_input):\n\tdic={}\n\tfor line in bed_input:\n\t\tif line.startswith(\"chr\"):\n\t\t\ttmp=line.strip().split()\n\t\t\tif tmp[0] not in dic:\n\t\t\t\tdic[tmp[0]]={}\n\t\t\tfor i in range(int(tmp[1]),int(tmp[2])+1):\n\t\t\t\tdic[tmp[0]][i]=\"\"\n\treturn dic\n\ndef get_capture_rate(bed_input,depth_input):\n\ttarget_base=0\n\tmapping_base=0\n\ttarget=get_capture_region(bed_input)\n\tfor line in depth_input:\n\t\ttmps=line.strip().split()\n\t\tmapping_base+=int(tmps[2])\n\t\tif tmps[0] in target and int(tmps[1]) in target[tmps[0]]:\n\t\t\ttarget_base+=int(tmps[2])\n\t\t\tprint (line.strip())\n\trate=target_base/mapping_base\n\treturn [target_base,mapping_base,rate]\n\n\n\ndef main():\n\tparser=argparse.ArgumentParser(description=__doc__)\n\tparser.add_argument('-i','--input',help='Depth which used samtools',dest='depth_input',required=True,type=open)\n\tparser.add_argument('-t',help='the bed of exon chip',dest='bed_input',required=True,type=open)\n\tparser.add_argument('-o','--output',help='the table of results of capture rate',dest=\"output\",required=True,type=argparse.FileType('w'))\n\targs=parser.parse_args()\n\tresult=get_capture_rate(args.bed_input,args.depth_input)\n\targs.output.write(\"target_base\\t\"+str(result[0])+\"\\n\")\n\targs.output.write(\"mapping_base\\t\"+str(result[1])+\"\\n\")\n\targs.output.write(\"capture_rate\\t\"+str(result[2])+\"\\n\")\n\n\n\nif __name__==\"__main__\":\n\tmain()\n\n\n", "sub_path": "WGS/bin/capture_rate_v2.py", "file_name": "capture_rate_v2.py", "file_ext": "py", "file_size_in_byte": 1680, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 39, "usage_type": "call"}, {"api_name": "argparse.FileType", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "479819189", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datatime import datetime\nimport os.path\nimport time\n\nimport numpy as np\nfrom six.moves import xrange\nimport tensorflow as tf\n\nfrom tensorflow.models.image.cifar10 import cifar10\n\nFLAGS = tf.app.flags.FLAGS\ntf.app.flags.DEFINE_string('train_dir', 'cifar10_trian', 'Directory where to write event logs and checkpoint')\ntf.app.flags.DEFINE_integer('max_steps', 1000000, 'number of batches to run')\ntf.app.flags.DEFINE_boolean('log_device_placement', False, 'where to log device placement')\n\ndef train():\n with tf.Graph().as_default():\n global_step = tf.Variable(0, trainable=False)\n # 读取cifar10中的数据\n images, labels = cifar10.distorted_inputs()\n\n logits = cifar10.inference(images)\n loss = cifar10.loss(logits, labels)\n train_op = cifar10.train(loss, global_step)\n saver = tf.train.Saver(tf.all_variables())\n summary_op = tf.merge_all_summaries()\n init = tf.initialize_all_variables()\n sess = tf.Session(\n config=tf.ConfigProto(\n log_device_placement=FLAGS.log_device_placement\n )\n )\n tf.train.start_queue_runners(sess=sess)\n summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)\n \n for step in xrange(FLAGS.max_steps):\n start_time = time.time()\n _, loss_value = sess.run([train_op, loss])\n duration = time.time() - start_time\n \n assert not np.isnan(loss_value), 'Model diverged with loss = NaN'\n \n if step % 10 == 0:\n num_examples_per_step = FLAGS.batch_size\n examples_per_sec = num_examples_per_step / duration\n sec_per_batch = float(duration)\n format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)')\n print(format_str % (datetime.now(), step, loss_value, examples_per_sec, sec_per_batch))\n\n if step % 100 == 0:\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n \n if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:\n checkpoint_path = os.path.join(FLAS.train_dir,'model.ckpt')\n saver.save(sess, checkpoint_path, global_step=step)\n ", "sub_path": "tensorflow库的学习/cifar10_train.py", "file_name": "cifar10_train.py", "file_ext": "py", "file_size_in_byte": 2342, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "tensorflow.app", "line_number": 15, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_string", "line_number": 16, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 16, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_integer", "line_number": 17, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 17, "usage_type": "attribute"}, {"api_name": "tensorflow.app.flags.DEFINE_boolean", "line_number": 18, "usage_type": "call"}, {"api_name": "tensorflow.app", "line_number": 18, "usage_type": "attribute"}, {"api_name": "tensorflow.Graph", "line_number": 21, "usage_type": "call"}, {"api_name": "tensorflow.Variable", "line_number": 22, "usage_type": "call"}, {"api_name": "tensorflow.models.image.cifar10.cifar10.distorted_inputs", "line_number": 24, "usage_type": "call"}, {"api_name": "tensorflow.models.image.cifar10.cifar10", "line_number": 24, "usage_type": "name"}, {"api_name": "tensorflow.models.image.cifar10.cifar10.inference", "line_number": 26, "usage_type": "call"}, {"api_name": "tensorflow.models.image.cifar10.cifar10", "line_number": 26, "usage_type": "name"}, {"api_name": "tensorflow.models.image.cifar10.cifar10.loss", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.models.image.cifar10.cifar10", "line_number": 27, "usage_type": "name"}, {"api_name": "tensorflow.models.image.cifar10.cifar10.train", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.models.image.cifar10.cifar10", "line_number": 28, "usage_type": "name"}, {"api_name": "tensorflow.train.Saver", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.all_variables", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.merge_all_summaries", "line_number": 30, "usage_type": "call"}, {"api_name": "tensorflow.initialize_all_variables", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 33, "usage_type": "call"}, {"api_name": "tensorflow.train.start_queue_runners", "line_number": 37, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 37, "usage_type": "attribute"}, {"api_name": "tensorflow.train.SummaryWriter", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.train", "line_number": 38, "usage_type": "attribute"}, {"api_name": "six.moves.xrange", "line_number": 40, "usage_type": "call"}, {"api_name": "time.time", "line_number": 41, "usage_type": "call"}, {"api_name": "time.time", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 45, "usage_type": "call"}, {"api_name": "datatime.datetime.now", "line_number": 52, "usage_type": "call"}, {"api_name": "datatime.datetime", "line_number": 52, "usage_type": "name"}, {"api_name": "os.path.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "436609992", "text": "from six import iteritems\nimport json\n\n\ndef convert_to_psp_schema(schema):\n d = {}\n for k, v in iteritems(schema):\n if 'float' in v:\n d[k] = 'float'\n elif 'int' in v:\n d[k] = 'integer'\n elif 'bool' in v:\n d[k] = 'boolean'\n elif ':' in v or '-' in v or 'date' in v or 'time' in v:\n d[k] = 'date'\n elif 'str' in v or 'string' in v:\n d[k] = 'string'\n else:\n d[k] = 'string'\n return d\n\n\ndef validate_schema(schema):\n return convert_to_psp_schema(schema)\n\n\ndef schema(data, typ):\n schema = validate_schema(data, typ)\n return json.dumps(schema)\n", "sub_path": "perspective/core/schema.py", "file_name": "schema.py", "file_ext": "py", "file_size_in_byte": 665, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "six.iteritems", "line_number": 7, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "311121799", "text": "import html\r\nimport re\r\nimport requests\r\nimport time\r\nimport uptime\r\n\r\nfrom Crypto.Cipher import DES\r\n\r\nclass BeanfunClient:\r\n class Account:\r\n def __init__(self, acc, sotp, name, decode=True):\r\n self.acc = acc\r\n self.sotp = sotp\r\n self.name = html.unescape(name) if decode else name\r\n self.secreate_time = None\r\n\r\n def __str__(self):\r\n return self.acc + ', ' + self.sotp + ', ' + self.name\r\n\r\n def __init__(self, service_code='610074', service_region='T9'):\r\n self.session = requests.session()\r\n self.service_code = service_code\r\n self.service_region = service_region\r\n self.headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'\r\n }\r\n\r\n def get_skey(self):\r\n result = self.session.get('https://tw.beanfun.com/beanfun_block/bflogin/default.aspx?service_code=999999&service_region=T0', headers=self.headers)\r\n skey_pattern = r'strSessionKey = \"(.*?)\"'\r\n skey = re.findall(skey_pattern, result.text)[0]\r\n return skey\r\n\r\n def login(self, username, password):\r\n print('[Info] Logging in ...')\r\n skey = self.get_skey()\r\n login_page_url = 'https://tw.newlogin.beanfun.com/login/id-pass_form.aspx?skey=' + skey\r\n login_page = self.session.get(login_page_url, headers=self.headers)\r\n\r\n viewstate_pattern = r'id=\"__VIEWSTATE\" value=\"(.*?)\"'\r\n viewstate = re.findall(viewstate_pattern, login_page.text)[0]\r\n \r\n eventvalidation_pattern = r'id=\"__EVENTVALIDATION\" value=\"(.*?)\"'\r\n eventvalidation = re.findall(eventvalidation_pattern, login_page.text)[0]\r\n\r\n viewstateGenerator_pattern = r'id=\"__VIEWSTATEGENERATOR\" value=\"(.*?)\"'\r\n viewstateGenerator = re.findall(viewstateGenerator_pattern, login_page.text)[0]\r\n \r\n samplecaptcha_pattern = r'id=\"LBD_VCID_c_login_idpass_form_samplecaptcha\" value=\"(.*?)\"'\r\n samplecaptcha = re.findall(samplecaptcha_pattern, login_page.text)[0]\r\n\r\n login_data = {\r\n '__EVENTTARGET': '',\r\n '__EVENTARGUMENT': '',\r\n '__VIEWSTATE': viewstate,\r\n '__VIEWSTATEGENERATOR': viewstateGenerator,\r\n '__EVENTVALIDATION': eventvalidation,\r\n 't_AccountID': username,\r\n 't_Password': password,\r\n 'CodeTextBox': '',\r\n 'btn_login': '登入',\r\n 'LBD_VCID_c_login_idpass_form_samplecaptcha': samplecaptcha\r\n }\r\n\r\n login_result = self.session.post(login_page_url, data=login_data, headers=self.headers)\r\n akey_pattern = r'AuthKey.value = \"(.*?)\"'\r\n akey = re.findall(akey_pattern, login_result.text)[0]\r\n \r\n auth_url = 'https://tw.newlogin.beanfun.com/login/final_step.aspx?akey=' + akey\r\n auth_result = self.session.get(auth_url, headers=self.headers)\r\n\r\n data = {\r\n 'SessionKey': skey,\r\n 'AuthKey': akey\r\n }\r\n url = 'https://tw.beanfun.com/beanfun_block/bflogin/return.aspx'\r\n result = self.session.post(url, headers=self.headers, data=data)\r\n\r\n self.session.get('https://tw.beanfun.com', headers=self.headers)\r\n self.web_token = self.session.cookies['bfWebToken']\r\n\r\n def get_accounts(self):\r\n game_zone_url = 'https://tw.beanfun.com/beanfun_block/auth.aspx?channel=game_zone&page_and_query=game_start.aspx%3Fservice_code_and_region%3D{}_{}&web_token={}'\r\n game_zone_url = game_zone_url.format(self.service_code, self.service_region, self.web_token)\r\n account_result = self.session.get(game_zone_url, headers=self.headers)\r\n \r\n account_pattern = r'div id=\"(.*?)\" sn=\"(.*?)\" name=\"(.*?)\"'\r\n account_list = re.findall(account_pattern, account_result.text)\r\n\r\n self.accounts = []\r\n for account in account_list:\r\n if '+' in account[0]:\r\n continue\r\n self.accounts.append(BeanfunClient.Account(*account))\r\n return self.accounts\r\n\r\n def show_accounts(self):\r\n print(\"[Info] Account List:\")\r\n for account in self.accounts:\r\n print(\"\\t - \" + str(account))\r\n print(\"\")\r\n\r\n def get_account_otp(self, account):\r\n current_time = time.strftime(\"%Y%m%d%H%M%S\", time.localtime())\r\n game_start_url = 'https://tw.beanfun.com/beanfun_block/game_zone/game_start_step2.aspx?service_code={}&service_region={}&sotp={}&dt={}'\r\n game_start_url = game_start_url.format(self.service_code, self.service_region, account.sotp, current_time)\r\n game_start_result = self.session.get(game_start_url, headers=self.headers)\r\n\r\n long_polling_key_pattern = r'GetResultByLongPolling&key=(.*?)\"'\r\n long_polling_key = re.findall(long_polling_key_pattern, game_start_result.text)[0]\r\n\r\n if not account.secreate_time:\r\n secreate_time_pattern = r'ServiceAccountCreateTime: \"(.*?)\"'\r\n account.secreate_time = re.findall(secreate_time_pattern, game_start_result.text)[0]\r\n\r\n secretCode_url = 'https://tw.newlogin.beanfun.com/generic_handlers/get_cookies.ashx'\r\n secretCode_result = self.session.get(secretCode_url)\r\n\r\n secretCode_pattern = r'var m_strSecretCode = \\'(.*)\\';'\r\n secretCode = re.findall(secretCode_pattern, secretCode_result.text)[0]\r\n\r\n otp_data = {\r\n 'service_code': self.service_code,\r\n 'service_region': self.service_region,\r\n 'service_account_id': account.acc,\r\n 'service_sotp': account.sotp,\r\n 'service_display_name': account.name,\r\n 'service_create_time': account.secreate_time\r\n }\r\n otp_url = 'https://tw.beanfun.com/beanfun_block/generic_handlers/record_service_start.ashx'\r\n otp_result = self.session.post(otp_url, data=otp_data)\r\n otp_url = 'https://tw.beanfun.com/generic_handlers/get_result.ashx?meth=GetResultByLongPolling&key=' + long_polling_key\r\n otp_result = self.session.get(otp_url)\r\n otp_url = 'http://tw.beanfun.com/beanfun_block/generic_handlers/get_webstart_otp.ashx?SN={}&WebToken={}&SecretCode={}' \\\r\n + '&ppppp=1F552AEAFF976018F942B13690C990F60ED01510DDF89165F1658CCE7BC21DBA&ServiceCode={}' \\\r\n + '&ServiceRegion={}&ServiceAccount={}&CreateTime={}&d={}'\r\n\r\n otp_url = otp_url.format(long_polling_key, self.web_token, secretCode, self.service_code, self.service_region,\r\n account.acc,account.secreate_time.replace(' ', '%20'), int(uptime.uptime() * 1000))\r\n otp_result = self.session.get(otp_url)\r\n\r\n print('[Info] OTP Result: ' + otp_result.text + '\\t' + time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\r\n status = otp_result.text[0]\r\n if status != '1':\r\n print('[Error] Need to restart the program.')\r\n return -1\r\n\r\n key = otp_result.text[2: 10]\r\n text = otp_result.text[10:]\r\n otp = self.decrypt_des(key, text)\r\n return otp\r\n\r\n def decrypt_des(self, key, text):\r\n bytes_key = key.encode('ascii')\r\n bytes_text = bytes.fromhex(text)\r\n des = DES.new(bytes_key, DES.MODE_ECB)\r\n decrypted_text = des.decrypt(bytes_text)[:10].decode()\r\n return decrypted_text", "sub_path": "beanfun.py", "file_name": "beanfun.py", "file_ext": "py", "file_size_in_byte": 7346, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "html.unescape", "line_number": 14, "usage_type": "call"}, {"api_name": "requests.session", "line_number": 21, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 31, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 41, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 44, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 47, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 50, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 67, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 88, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 104, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 104, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 110, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 114, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 120, "usage_type": "call"}, {"api_name": "uptime.uptime", "line_number": 139, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 142, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 142, "usage_type": "call"}, {"api_name": "Crypto.Cipher.DES.new", "line_number": 156, "usage_type": "call"}, {"api_name": "Crypto.Cipher.DES", "line_number": 156, "usage_type": "name"}, {"api_name": "Crypto.Cipher.DES.MODE_ECB", "line_number": 156, "usage_type": "attribute"}]} +{"seq_id": "482066842", "text": "import os\nimport tempfile\nimport subprocess\nimport operator\nimport ftrack\n\n\ndef GetAssetNameById(id):\n for t in ftrack.getAssetTypes():\n try:\n if t.get('typeid') == id:\n return t.get('name')\n except:\n return None\n\ndef GetTaskTypeByName(name):\n task_types = ftrack.getTaskTypes()\n\n result = None\n for s in task_types:\n if s.getName().lower() == name.lower():\n result = s\n\n return result\n\ndef GetStatusByName(name):\n statuses = ftrack.getTaskStatuses()\n\n result = None\n for s in statuses:\n if s.get('name').lower() == name.lower():\n result = s\n\n return result\n\n\ndef GetNextTask(task):\n shot = task.getParent()\n tasks = shot.getTasks()\n\n def sort_types(types):\n data = {}\n for t in types:\n data[t] = t.get('sort')\n\n data = sorted(data.items(), key=operator.itemgetter(1))\n results = []\n for item in data:\n results.append(item[0])\n\n return results\n\n types_sorted = sort_types(ftrack.getTaskTypes())\n\n next_types = None\n for t in types_sorted:\n if t.get('typeid') == task.get('typeid'):\n try:\n next_types = types_sorted[(types_sorted.index(t) + 1):]\n except:\n pass\n\n for nt in next_types:\n for t in tasks:\n if nt.get('typeid') == t.get('typeid'):\n return t\n\n return None\n\ndef getLatestVersion(versions):\n latestVersion = None\n if len(versions) > 0:\n versionNumber = 0\n for item in versions:\n if item.get('version') > versionNumber:\n versionNumber = item.getVersion()\n latestVersion = item\n return latestVersion\n\ndef getShots(entity):\n result = []\n\n if entity.get('objecttypename') == 'Task':\n for parent in entity.getParents():\n try:\n if parent.get('objecttypename') == 'Shot':\n result.append(parent)\n except:\n pass\n\n if entity.get('objecttypename') == 'Shot':\n result.append(entity)\n\n if entity.get('objecttypename') == 'Sequence':\n for shot in entity.getShots():\n result.extend(getShots(shot))\n\n if entity.get('objecttypename') == 'Episode':\n for seq in entity.getSequences():\n result.extend(getShots(seq))\n\n return result\n\ndef submitQT(version):\n component = version.getComponent('main')\n filePath = component.getFile()\n settingsPath = r'K:\\ftrack\\ftrack.git\\quicktime_export_settings.xml'\n settingsPath =settingsPath.replace('\\\\', '/')\n\n #frames\n versionPath = os.path.dirname(filePath)\n files = os.listdir(versionPath)\n files = sorted(files, key = lambda x: x.split('_')[-1].split('.')[0])\n firstFrame = files[0].split('_')[-1].split('.')[0]\n lastFrame = files[-1].split('_')[-1].split('.')[0]\n\n #outputdir\n outputDir = os.path.dirname(os.path.dirname(filePath))\n\n #outputfilename\n outputFilename = os.path.basename(filePath)\n outputFilename = '_'.join(outputFilename.split('_')[0:-1]) + '.mov'\n\n #inputimages\n inputimages = os.path.join(versionPath, files[0])\n\n #outputfile\n outputfile = os.path.join(outputDir, outputFilename).replace('\\\\', '/')\n\n #audiofile\n pathList = os.path.dirname(filePath.replace('/', '\\\\')).split(os.sep)\n pathList[2] = 'episodes'\n pathList.insert(1, os.sep)\n path = os.path.join(*pathList)\n\n pathList.insert(-2, 'audio')\n pathList[-2] = '_'.join(outputFilename.split('_')[0:-1]) + '.wav'\n filePath = os.path.join(*pathList[0:-1])\n\n audiofile = None\n if os.path.exists(filePath):\n audiofile = filePath\n\n #get temp directory\n tempDir=tempfile.gettempdir()\n\n #generate plugin file\n jobData = 'Plugin=Quicktime\\nPriority=50\\nPool=medium\\nChunkSize=100000\\n'\n jobData += 'Comment=Ftrack submit\\n'\n jobData += 'Name=%s\\n' % outputFilename.replace('.mov', '')\n jobData += 'Frames=%s-%s\\n' % (int(firstFrame), int(lastFrame))\n jobData += 'OutputFilename0=%s\\n' % outputfile\n\n jobFile=open((tempDir+'/job_info.job'),'w')\n jobFile.write(jobData)\n jobFile.close()\n jobFile=(tempDir+'/job_info.job')\n jobFile=jobFile.replace('\\\\','/')\n\n #generate submit file\n pluginData = 'FrameRate=25.0\\nCodec=QuickTime Movie\\n'\n pluginData += 'InputImages=%s\\n' % inputimages.replace('\\\\', '/')\n pluginData += 'OutputFile=%s\\n' % outputfile\n if audiofile:\n pluginData += 'AudioFile=%s\\n' % audiofile.replace('\\\\', '/')\n\n pluginFile=open((tempDir+'/plugin_info.job'),'w')\n pluginFile.write(pluginData)\n pluginFile.close()\n pluginFile=(tempDir+'/plugin_info.job')\n pluginFile=pluginFile.replace('\\\\','/')\n\n #submitting to Deadline\n deadlineCommand = 'C:/Program Files/Thinkbox/Deadline6/bin/deadlinecommand.exe'\n\n if not os.path.exists(path):\n deadlineCommand = 'C:/Program Files/Thinkbox/Deadline7/bin/deadlinecommand.exe'\n\n result = subprocess.Popen((deadlineCommand,jobFile,pluginFile,settingsPath),\n stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT,shell=False)\n\n #create movie if none exists, or delete old and create new\n try:\n version.createComponent(name='movie', path=outputfile)\n except:\n movComponent = version.getComponent(name='movie')\n movComponent.delete()\n version.createComponent(name='movie', path=outputfile)\n\ndef getThumbnailRecursive(task):\n if task.get('thumbid'):\n thumbid = task.get('thumbid')\n return ftrack.Attachment(id=thumbid)\n if not task.get('thumbid'):\n parent = ftrack.Task(id=task.get('parent_id'))\n return getThumbnailRecursive(parent)\n\ndef getTasksRecursive(entity):\n result = []\n\n if entity.get('objecttypename') == 'Task':\n result.append(entity)\n\n if entity.get('objecttypename') == 'Shot':\n for task in entity.getTasks():\n\n result.append(task)\n\n if entity.get('objecttypename') == 'Sequence':\n for shot in entity.getShots():\n result.extend(getTasksRecursive(shot))\n\n if entity.get('objecttypename') == 'Episode':\n for seq in entity.getSequences():\n result.extend(getTasksRecursive(seq))\n\n return result\n\ndef getGlobExpression(filename):\n\n _kVersionRegex = \"([/._]v)(\\\\d+)\"\n _kPaddedSequenceRegex = \"%((\\\\d)*)(d)\"\n\n # Replace version indices\n matches = [match for match in re.finditer(_kVersionRegex, filename, re.IGNORECASE)]\n if len(matches) > 0:\n\n # Obtain version index from the last version string, ignore the others\n match = matches[-1]\n\n # Replace sequence padding.\n matches = [match for match in re.finditer(_kPaddedSequenceRegex, filename, re.IGNORECASE)]\n if len(matches) > 0:\n # Iterate through matches, if the version string equals versionIndex (\"active one\"), substitute\n # NB: Reverse iteration guarantees safety of modifying filename by splitting at given positions (match.start() / end())\n for match in matches:\n pre = filename[:match.start() - 1] # -1 is to remove possibly leading '.' or similar before sequence padding\n post = filename[match.end():]\n filename = pre + \"*\" + post\n\n return filename\n", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 7403, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "ftrack.getAssetTypes", "line_number": 9, "usage_type": "call"}, {"api_name": "ftrack.getTaskTypes", "line_number": 17, "usage_type": "call"}, {"api_name": "ftrack.getTaskStatuses", "line_number": 27, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 46, "usage_type": "call"}, {"api_name": "ftrack.getTaskTypes", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 111, "usage_type": "call"}, {"api_name": "os.path", "line_number": 111, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 112, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 118, "usage_type": "call"}, {"api_name": "os.path", "line_number": 118, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 125, "usage_type": "call"}, {"api_name": "os.path", "line_number": 125, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 131, "usage_type": "attribute"}, {"api_name": "os.sep", "line_number": 133, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 134, "usage_type": "call"}, {"api_name": "os.path", "line_number": 134, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 138, "usage_type": "call"}, {"api_name": "os.path", "line_number": 138, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 141, "usage_type": "call"}, {"api_name": "os.path", "line_number": 141, "usage_type": "attribute"}, {"api_name": "tempfile.gettempdir", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "subprocess.Popen", "line_number": 179, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 180, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 181, "usage_type": "attribute"}, {"api_name": "ftrack.Attachment", "line_number": 194, "usage_type": "call"}, {"api_name": "ftrack.Task", "line_number": 196, "usage_type": "call"}]} +{"seq_id": "208131467", "text": "from django.shortcuts import render,redirect\nfrom django.contrib.auth.models import User\n\n#importing made form from django github\nfrom django.contrib.auth.forms import UserCreationForm\n# Create your views here.\nfrom django.contrib.auth.decorators import login_required\n\ndef home(request):\n\tcount = User.objects.count()\n\treturn render(request,'home.html',{\n\t\t'count':count\n\t\t})\n\ndef signup(request):\n\tif request.method == 'POST':\n\t\tform = UserCreationForm(request.POST)\n\t\tif form.is_valid():\n\t\t\tform.save()\t\n\t\t\treturn redirect('home')\n\telse:\n\t\tform = UserCreationForm()\n\treturn render(request,'registration/signup.html',{\n\t\t'form':form\n\t\t})\n\n#to make sure that secret can't be accessed without logging in first\n@login_required\ndef secret(request):\n\treturn render(request,'secret.html')\n", "sub_path": "Webscrawler Django/mysite/core/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 785, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.contrib.auth.models.User.objects.count", "line_number": 10, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 10, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.UserCreationForm", "line_number": 17, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 20, "usage_type": "call"}, {"api_name": "django.contrib.auth.forms.UserCreationForm", "line_number": 22, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 23, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 30, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "604796796", "text": "from django.conf import settings\nfrom django.shortcuts import redirect\nfrom django.views.generic import FormView\n\nfrom saleboxdjango.lib.checkout import SaleboxCheckout\n\n\nclass SaleboxCheckoutBaseView(FormView):\n checkout_step = None\n\n def get_conf(self, name, default):\n return settings.SALEBOX['CHECKOUT'].get(name, default)\n\n def dispatch(self, request, *args, **kwargs):\n # friendlier error messages\n if self.checkout_step is None:\n raise Exception('You need to define a checkout_step')\n if self.form_class is None:\n raise Exception('You need to define a form_class')\n\n # get defaults\n user_must_be_authenticated = self.get_conf(\n 'CHECKOUT_USER_MUST_BE_AUTHENTICATED',\n True\n )\n user_not_authenticated_redirect = self.get_conf(\n 'CHECKOUT_USER_NOT_AUTHENTICATED_REDIRECT',\n '/'\n )\n\n # check logged in\n if user_must_be_authenticated and not request.user.is_authenticated:\n return redirect(user_not_authenticated_redirect)\n\n # get checkout object\n self.sc = SaleboxCheckout(request)\n r = self.sc.page_redirect(self.checkout_step)\n if r is not None:\n return redirect(r)\n\n # store the request in the object\n self.request = request\n\n # default dispatch action\n return super().dispatch(request, *args, **kwargs)\n\n def form_valid(self, form):\n valid = self.form_valid_pre_redirect(form)\n if not valid:\n return self.form_invalid(form)\n\n # set as complete and redirect to the next step\n r = self.sc.set_completed(self.checkout_step, self.request)\n if r is None:\n raise Exception('There is no next checkout step to redirect to...')\n else:\n return redirect(r)\n\n def form_valid_pre_redirect(self, form):\n # add you own code here\n # self.request is available\n # return True if form handled correctly\n # return False to re-show the page as form_invalid()\n #\n return True\n\n def get_additional_context_data(self, context):\n # add your custom code here, e.g...\n # context['foo'] = 'bar'\n # ...then just return it\n # self.request is available\n #\n return context\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n data = self.sc.get_raw_data()\n\n total_price = data['basket']['sale_price']\n if data['shipping_method']['id'] is not None:\n total_price += data['shipping_method']['price']\n\n context['checkout'] = {\n 'data': data,\n 'nav': self.sc.get_checkout_nav(self.checkout_step),\n 'step': self.checkout_step,\n 'total_price': total_price\n }\n return self.get_additional_context_data(context)\n", "sub_path": "saleboxdjango/views/checkout/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 2921, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.views.generic.FormView", "line_number": 8, "usage_type": "name"}, {"api_name": "django.conf.settings.SALEBOX", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 12, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 33, "usage_type": "call"}, {"api_name": "saleboxdjango.lib.checkout.SaleboxCheckout", "line_number": 36, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 39, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 57, "usage_type": "call"}]} +{"seq_id": "595825646", "text": "#! /usr/bin/env python3\n#-*- coding:utf-8 -*-\n#\nimport poplib\n\nfrom email.parser import Parser\nfrom email.header import decode_header\nfrom email.utils import parseaddr\n\n\n\n\n\n#邮件地址 密码和服务器地址\nemail='1369058574@qq.com'\npassword='pnywzdormtoobaae'\npop3_server='pop.qq.com'\n\n#连接到pop3服务器\nserver=poplib.POP3_SSL(pop3_server,995)\n#打开调试信息\nserver.set_debuglevel(1)\n#打印服务器端的欢迎信息\nprint(server.getwelcome().decode('utf-8'))\n\n#身份认证\nserver.user(email)\nserver.pass_('pnywzdormtoobaae')\n\n#stat()返回邮件数量和占用空间\nprint('Message:%s . \\n Szie:%s '%server.stat())\n#list()返回所有邮件编号\nresp,mails,octets=server.list()\n#查看返回的列表\nprint(mails)\n\n#获取最新的一封邮件,索引号从1开始\nindex=len(mails)\nresp,lines,octets=server.retr(index)\n\n#lines存储量邮件的原始文本的每一行,\n#可以获得整个邮件的原始文本\n\nmsg_content=b'\\r\\n'.join(lines).decode('utf-8')\n#稍后解析出邮件:\nmsg=Parser().parsestr(msg_content)\n\n\n#可以根据邮件索引号直接嗯从服务器端删除邮件\n#应 server.dele(index)\nserver.quit()\n\ndef decode_str(s):\n\tvalue,charset=decode_header(s)[0]\n\tif charset:\n\t\tvalue=value.decode(charset)\n\treturn value\n\n\ndef guess_charset(msg):\n\tcharset=msg.get_charset()\n\tif charset is None:\n\t\tcontent_type=msg.get('Content-Type','').lower()\n\t\tpos=content_type.find('charset=')\n\t\tif pos>=0:\n\t\t\tcharset=content_type[pos+8:].strip()\n\n\treturn charset\n\n\n\n\n#indent用于缩进显示\ndef print_info(msg,indent=0):\n\tif indent==0:\n\t\tfor header in ['From','To','Subject']:\n\t\t\tvalue=msg.get(header,'')\n\t\t\tif value:\n\t\t\t\tif header=='Subject':\n\t\t\t\t\tvalue=decode_str(value)\n\t\t\t\telse:\n\t\t\t\t\thdr.addr=parseaddr(value)\n\t\t\t\t\tname=decode_str(hdr)\n\t\t\t\t\tvalue=u'%s <%s>'%(name,addr)\n\n\t\t\tprint('%s %s : %s'%(' '*indent,header,value))\n\n\tif (msg.is_multipart()):\n\t\tparts=msg.get_payload()\n\t\tfor n,part in enumerate(parts):\n\t\t\tprint('%spart %s '%(' '*indent,n))\n\t\t\tprint('%s-------------------'%(' '*indent))\n\t\t\tprint_info(part,indent+1)\n\n\telse:\n\t\tcontent_type=msg.get_content_type()\n\t\tif content_type=='text/plain' or content_type=='text/html':\n\t\t\tcontent=msg.get_payload(decode=True)\n\t\t\tcharset=guess_charset(msg)\n\t\t\tif charset:\n\t\t\t\tcontent=content.decode(charset)\n\n\t\t\tprint('%sText: %s'%(' '*indent,content+'...'))\n\t\telse:\n\t\t\tprint('%sAttachment: %s'%(' '*indent,content_type))\n\n\n\n\n\n\n\n\n", "sub_path": "pop3.py", "file_name": "pop3.py", "file_ext": "py", "file_size_in_byte": 2411, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "email.parser", "line_number": 15, "usage_type": "name"}, {"api_name": "poplib.POP3_SSL", "line_number": 20, "usage_type": "call"}, {"api_name": "email.parser", "line_number": 27, "usage_type": "argument"}, {"api_name": "email.parser.Parser", "line_number": 46, "usage_type": "call"}, {"api_name": "email.header.decode_header", "line_number": 54, "usage_type": "call"}, {"api_name": "email.utils.parseaddr", "line_number": 82, "usage_type": "call"}]} +{"seq_id": "29728059", "text": "import pyautogui\nimport time\nimport os\nimport tkinter\nimport threading\nimport random\nimport datetime\nimport winsound\nimport sys\n\n\ndef Program():\n global stepCount, now, playAgain, needBreak, logMessage, fightChar, troopChar, battles_done, battles_done_text, play_again_color, play_again_x, play_again_y, start_color, start_x, start_y\n # pyautogui.PAUSE = 1\n pyautogui.FAILSAFE = True\n\n print('Press Ctrl-C to quit.')\n\n #play_again_x, play_again_y = pyautogui.position()\n #im = pyautogui.screenshot()\n #if play_again_x < 1920:\n # play_again_color = im.getpixel((play_again_x, play_again_y))\n\n #start_x, start_y = pyautogui.position()\n #if start_x < 1920:\n # start_color = im.getpixel((start_x, start_y))\n\n settingsFile = open('ews.txt').read()\n #print(settingsFile.readlines()[1])\n lines = settingsFile.split('.')\n play_again_x = int(lines[0])\n play_again_y = int(lines[1])\n old_pac = lines[2]\n pac = old_pac.split('(')\n pac = pac[1].split(')')\n pac = pac[0].split(',')\n play_again_color = (int(pac[0]), int(pac[1]), int(pac[2]))\n #play_again_color = lines[2]\n start_x = int(lines[3])\n start_y = int(lines[4])\n old_sc = lines[5]\n sc = old_sc.split('(')\n sc = sc[1].split(')')\n sc = sc[0].split(',')\n start_color = (int(sc[0]), int(sc[1]), int(sc[2]))\n close_x = play_again_x-154\n close_y = play_again_y\n play_x = play_again_x-118\n play_y = play_again_y-32\n missions_x = play_again_x-431\n mission1_y = play_again_y-208\n mission2_y = play_again_y-174\n mission3_y = play_again_y-130\n mission4_y = play_again_y-92\n mission5_y = play_again_y-67\n missionF_y = play_again_y-20\n prev_chapter_x = play_again_x-519\n prev_chapter_y = play_again_y-130\n next_chapter_x = play_again_x+219\n next_chapter_y = play_again_y-135\n #start_color = lines[5]\n print(play_again_color)\n\n\n def async_play_again():\n thread = threading.Thread(target=snap_play_again)\n #thread.daemon = True\n thread.start()\n\n def async_start():\n thread = threading.Thread(target=snap_start)\n #thread.daemon = True\n thread.start()\n\n def snap_play_again():\n global play_again_color, play_again_x, play_again_y, im\n text.insert('1.0', datetime.datetime.now().strftime(\"%H:%M:%S\") + \"Picking play again in 5\\n\")\n time.sleep(1)\n text.insert('1.0', datetime.datetime.now().strftime(\"%H:%M:%S\") + \"Picking play again in 4\\n\")\n time.sleep(1)\n text.insert('1.0', datetime.datetime.now().strftime(\"%H:%M:%S\") + \"Picking play again in 3\\n\")\n time.sleep(1)\n text.insert('1.0', datetime.datetime.now().strftime(\"%H:%M:%S\") + \"Picking play again in 2\\n\")\n time.sleep(1)\n text.insert('1.0', datetime.datetime.now().strftime(\"%H:%M:%S\") + \"Picking play again in 1\\n\")\n time.sleep(1)\n play_again_x2, play_again_y2 = pyautogui.position()\n im = pyautogui.screenshot()\n play_again_color2 = im.getpixel((play_again_x2, play_again_y2))\n s = open(\"ews.txt\").read()\n s = s.replace(str(old_pac), str(play_again_color2))\n s = s.replace(str(play_again_x), str(play_again_x2))\n s = s.replace(str(play_again_y), str(play_again_y2))\n f = open(\"ews.txt\", 'w')\n f.write(s)\n f.close()\n play_again_color = play_again_color2\n play_again_x = play_again_x2\n play_again_y = play_again_y2\n print(play_again_color)\n text.insert('1.0', datetime.datetime.now().strftime(\"%H:%M:%S\") + \"Done\\n\")\n\n def snap_start():\n global start_color, start_x, start_y, im\n text.insert('1.0', datetime.datetime.now().strftime(\"%H:%M:%S\") + \"Picking start in 5\\n\")\n time.sleep(1)\n text.insert('1.0', datetime.datetime.now().strftime(\"%H:%M:%S\") + \"Picking start in 4\\n\")\n time.sleep(1)\n text.insert('1.0', datetime.datetime.now().strftime(\"%H:%M:%S\") + \"Picking start in 3\\n\")\n time.sleep(1)\n text.insert('1.0', datetime.datetime.now().strftime(\"%H:%M:%S\") + \"Picking start in 2\\n\")\n time.sleep(1)\n text.insert('1.0', datetime.datetime.now().strftime(\"%H:%M:%S\") + \"Picking start in 1\\n\")\n time.sleep(1)\n start_x2, start_y2 = pyautogui.position()\n im = pyautogui.screenshot()\n start_color2 = im.getpixel((start_x2, start_y2))\n s = open(\"ews.txt\").read()\n s = s.replace(str(old_sc), str(start_color2))\n s = s.replace(str(start_x), str(start_x2))\n s = s.replace(str(start_y), str(start_y2))\n f = open(\"ews.txt\", 'w')\n f.write(s)\n f.close()\n start_x = start_x2\n start_y = start_y2\n start_color = start_color2\n text.insert('1.0', datetime.datetime.now().strftime(\"%H:%M:%S\") + \"Done\\n\")\n\n\n def color_loc():\n global im\n while True:\n im = pyautogui.screenshot()\n # Get and print the mouse coordinates.\n x, y = pyautogui.position()\n if x < 1920:\n position_str = 'X: ' + str(x).rjust(4) + ' Y: ' + str(y).rjust(4) + ' Colors : ' + str(im.getpixel((x, y)))\n print(position_str, end='')\n print('\\b' * len(position_str), end='', flush=True)\n\n\n needBreak = False\n logMessage = \"\"\n fightChar = '2,3'\n troopChar = 'q,w,e,r,t,y,u'\n battles_done = 0\n stepCount = 0\n playAgain = True\n\n\n def farm_quest():\n global stepCount, now, playAgain, needBreak, logMessage, fightChar, troopChar, battles_done, battles_done_text, play_again_color, play_again_x, play_again_y, start_color, start_x, start_y\n needBreak = False\n playAgain = True\n fight_start = datetime.datetime.now()\n battles_done = 0\n pyautogui.click(play_x, play_y)\n while True:\n now = datetime.datetime.now()\n if needBreak:\n text.insert('1.0', now.strftime(\"%H:%M:%S\") + \" stopping.\\n\")\n break\n try:\n to_battle = int(wasteEntry.get('1.0', tkinter.END))\n except:\n try:\n to_battle = wasteEntry.get().split(',')\n except:\n to_battle = \"s\"\n # print(to_battle)\n # print(battles_done)\n if not isinstance(to_battle,(list,)) and is_number(to_battle):\n # print(\"its a number\")\n if battles_done == to_battle:\n # print(\"playing sound\")\n text.insert('1.0', now.strftime(\"%H:%M:%S\") + \" Reached battles, stopping.\\n\")\n duration = 500 # millisecond\n freq = 500 # Hz\n winsound.Beep(freq, duration)\n duration = 500 # millisecond\n freq = 37 # Hz\n winsound.Beep(freq, duration)\n duration = 500 # millisecond\n freq = 500 # Hz\n winsound.Beep(freq, duration)\n break\n elif len(to_battle) > 1 and len(to_battle) > stepCount:\n commands = to_battle\n last_step = \"s\"\n for step in commands:\n if needBreak:\n text.insert('1.0', now.strftime(\"%H:%M:%S\") + \" stopping.\\n\")\n stepCount = 0\n break\n print(step)\n if is_number(last_step):\n while not pyautogui.pixelMatchesColor(play_again_x, play_again_y, play_again_color):\n pyautogui.press(troopChar[0]) # Press first troopChar first\n pyautogui.press(troopChar[random.randint(0, troop_count)])\n time.sleep(1)\n if is_number(step):\n time.sleep(1)\n pyautogui.click(play_x, play_y)\n to_battle = step\n last_step = step\n battles_done = 0\n text.insert('1.0', now.strftime(\"%H:%M:%S\") + \" Doing \" + to_battle + \" battles\\n\")\n while battles_done < int(to_battle):\n if needBreak:\n text.insert('1.0', now.strftime(\"%H:%M:%S\") + \" stopping.\\n\")\n break\n if pyautogui.pixelMatchesColor(play_again_x, play_again_y, play_again_color) and playAgain:\n fight_duration = datetime.datetime.now() - fight_start\n fight_duration = divmod(fight_duration.days * 86400 + fight_duration.seconds, 60)\n if fight_duration[1] > 0 or fight_duration[0] > 0:\n battles_done += 1\n text.insert('1.0', now.strftime(\"%H:%M:%S\") + \" Fight Duration: \" + str(\n fight_duration[0]) + \"m\" + str(\n fight_duration[1]) + \"s\" + \". Troops: \" + troopCharEntry.get() + \"\\n\")\n battles_done_text.set(\"Battles done: \" + str(battles_done))\n logMessage = \"Clicking Play Again.\"\n text.insert('1.0', now.strftime(\"%H:%M:%S\") + \" Clicking Play Again.\\n\")\n pyautogui.click(play_again_x, play_again_y)\n time.sleep(1)\n if pyautogui.pixelMatchesColor(start_x, start_y, start_color):\n logMessage = \"Clicking Start.\"\n text.insert('1.0', now.strftime(\"%H:%M:%S\") + \" Clicking Start.\\n\")\n pyautogui.click(start_x, start_y)\n #time.sleep(1)\n if logMessage != \"Fighting.\":\n logMessage = \"Fighting.\"\n fight_start = datetime.datetime.now()\n text.insert('1.0', now.strftime(\"%H:%M:%S\") + \" Fighting.\\n\")\n #time.sleep(1)\n log_count = int(text.index('end-1c').split('.')[0])\n if log_count > 23:\n text.delete(\"end-1c linestart\", \"end\")\n time.sleep(0.1)\n troopChar = troopCharEntry.get().split(',')\n troop_count = len(troopChar) - 1\n if stepCount < 1:\n pyautogui.press(troopChar[0]) # Press first troopChar first\n pyautogui.press(troopChar[random.randint(0, troop_count)])\n time.sleep(0.1)\n else:\n pyautogui.press(troopCharNew) # Press first troopChar first\n time.sleep(0.1)\n\n fightChar = fightCharEntry.get().split(',')\n fight_count = len(fightChar) - 1\n pyautogui.press(fightChar[random.randint(0, fight_count)])\n stepCount += 1\n elif step == \"next\":\n stepCount += 1\n last_step = \"s\"\n if pyautogui.pixelMatchesColor(play_again_x, play_again_y, play_again_color):\n pyautogui.click(close_x, close_y)\n time.sleep(2)\n pyautogui.click(next_chapter_x, next_chapter_y)\n time.sleep(2)\n elif step == \"prev\":\n stepCount += 1\n last_step = \"s\"\n if pyautogui.pixelMatchesColor(play_again_x, play_again_y, play_again_color):\n pyautogui.click(close_x, close_y)\n time.sleep(2)\n pyautogui.click(prev_chapter_x, prev_chapter_y)\n time.sleep(2)\n elif step == \"ch1\":\n stepCount += 1\n last_step = \"s\"\n if pyautogui.pixelMatchesColor(play_again_x, play_again_y, play_again_color):\n pyautogui.click(close_x, close_y)\n time.sleep(2)\n pyautogui.click(missions_x, mission1_y)\n time.sleep(2)\n elif step == \"ch2\":\n stepCount += 1\n last_step = \"s\"\n if pyautogui.pixelMatchesColor(play_again_x, play_again_y, play_again_color):\n pyautogui.click(close_x, close_y)\n time.sleep(2)\n pyautogui.click(missions_x, mission2_y)\n time.sleep(2)\n elif step == \"ch3\":\n stepCount += 1\n last_step = \"s\"\n if pyautogui.pixelMatchesColor(play_again_x, play_again_y, play_again_color):\n pyautogui.click(close_x, close_y)\n time.sleep(2)\n pyautogui.click(missions_x, mission3_y)\n time.sleep(2)\n elif step == \"ch4\":\n stepCount += 1\n last_step = \"s\"\n if pyautogui.pixelMatchesColor(play_again_x, play_again_y, play_again_color):\n pyautogui.click(close_x, close_y)\n time.sleep(2)\n pyautogui.click(missions_x, mission4_y)\n time.sleep(2)\n elif step == \"ch5\":\n stepCount += 1\n last_step = \"s\"\n if pyautogui.pixelMatchesColor(play_again_x, play_again_y, play_again_color):\n pyautogui.click(close_x, close_y)\n time.sleep(2)\n pyautogui.click(missions_x, mission5_y)\n time.sleep(2)\n elif step == \"ch6\":\n stepCount += 1\n last_step = \"s\"\n if pyautogui.pixelMatchesColor(play_again_x, play_again_y, play_again_color):\n pyautogui.click(close_x, close_y)\n time.sleep(2)\n pyautogui.click(missions_x, missionF_y)\n time.sleep(2)\n else:\n stepCount += 1\n last_step = \"s\"\n troopCharNew = step\n if pyautogui.pixelMatchesColor(play_again_x, play_again_y, play_again_color) and playAgain:\n fight_duration = datetime.datetime.now() - fight_start\n fight_duration = divmod(fight_duration.days * 86400 + fight_duration.seconds, 60)\n if fight_duration[1] > 0 or fight_duration[0] > 0:\n battles_done += 1\n text.insert('1.0', now.strftime(\"%H:%M:%S\") + \" Fight Duration: \" + str(fight_duration[0]) + \"m\" + str(\n fight_duration[1]) + \"s\" + \". Troops: \" + troopCharEntry.get() + \"\\n\")\n battles_done_text.set(\"Battles done: \" + str(battles_done))\n logMessage = \"Clicking Play Again.\"\n text.insert('1.0', now.strftime(\"%H:%M:%S\") + \" Clicking Play Again.\\n\")\n pyautogui.click(play_again_x, play_again_y)\n time.sleep(0.1)\n if pyautogui.pixelMatchesColor(start_x, start_y, start_color):\n logMessage = \"Clicking Start.\"\n text.insert('1.0', now.strftime(\"%H:%M:%S\") + \" Clicking Start.\\n\")\n pyautogui.click(start_x, start_y)\n #time.sleep(1)\n if logMessage != \"Fighting.\":\n logMessage = \"Fighting.\"\n fight_start = datetime.datetime.now()\n text.insert('1.0', now.strftime(\"%H:%M:%S\") + \" Fighting.\\n\")\n #time.sleep(1)\n log_count = int(text.index('end-1c').split('.')[0])\n if log_count > 23:\n text.delete(\"end-1c linestart\", \"end\")\n print(wasteEntry.get())\n if wasteEntry.get() == \"d\" and not pyautogui.pixelMatchesColor(play_again_x, play_again_y, play_again_color) and not pyautogui.pixelMatchesColor(start_x, start_y, start_color):\n fightChar = fightCharEntry.get()\n pyautogui.keyDown('d')\n time.sleep(4)\n pyautogui.keyUp('d')\n pyautogui.press(fightChar)\n time.sleep(0.1)\n troopChar = troopCharEntry.get().split(',')\n troop_count = len(troopChar) - 1\n if stepCount < 1:\n pyautogui.press(troopChar[0]) # Press first troopChar first\n pyautogui.press(troopChar[random.randint(0, troop_count)])\n time.sleep(0.1)\n else:\n pyautogui.press(troopCharNew) # Press first troopChar first\n time.sleep(0.1)\n\n fightChar = fightCharEntry.get().split(',')\n fight_count = len(fightChar) - 1\n pyautogui.press(fightChar[random.randint(0, fight_count)])\n\n\n\n def farm_start():\n wasteEntry.focus()\n thread = threading.Thread(target=farm_quest)\n #thread.daemon = True\n thread.start()\n\n\n #thread2 = Thread(target=color_loc)\n #thread2.start()\n\n\n def stop_farm_quest():\n global needBreak, battles_done\n wasteEntry.focus()\n battles_done = 0\n needBreak = True\n\n\n def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\n try:\n #while True:\n #color_loc()\n #farm_quest()\n top = tkinter.Tk()\n # Buttons\n\n farmButton = tkinter.Button(top, text=\"Farm Quest\", command=farm_start)\n farmButton.pack(side=tkinter.TOP)\n playAgainButton = tkinter.Button(top, text=\"Save Play Again\", command=async_play_again)\n startButton = tkinter.Button(top, text=\"Save Start\", command=async_start)\n playAgainButton.pack(side=tkinter.TOP)\n startButton.pack(side=tkinter.TOP)\n farmButtonStop = tkinter.Button(top, text=\"Stop Farm\", command=stop_farm_quest)\n farmButtonStop.pack(side=tkinter.TOP)\n # Log\n text = tkinter.Text()\n text.insert(tkinter.END, \"Loading...\\n Find top of G in Again with Save Play Again\\n\")\n text.pack(side=tkinter.BOTTOM)\n # Inputs\n wasteEntry = tkinter.Entry(bd=2)\n wasteEntry.pack(side=tkinter.BOTTOM)\n\n fightCharLabel = tkinter.Label(top, text=\"Fight Key\")\n fightCharLabel.pack(side=tkinter.LEFT)\n fightCharEntry = tkinter.Entry(top, bd=2)\n fightCharEntry.pack(side=tkinter.LEFT)\n fightCharEntry.insert(tkinter.END, fightChar)\n troopCharLabel = tkinter.Label(top, text=\"Troop Key\")\n troopCharLabel.pack(side=tkinter.RIGHT)\n troopCharEntry = tkinter.Entry(top, bd=2)\n troopCharEntry.pack(side=tkinter.RIGHT)\n troopCharEntry.insert(tkinter.END, troopChar)\n battles_done_text = tkinter.StringVar()\n battles_done_text.set(\"Battles done: \")\n fightCountLabel = tkinter.Label(top, textvariable=battles_done_text)\n fightCountLabel.pack(side=tkinter.LEFT, padx=90)\n # Mainloop\n top.mainloop()\n\n except KeyboardInterrupt:\n print(\"Done\")\n\n\nif __name__ == '__main__':\n sys.setrecursionlimit(100000)\n threading.stack_size(200000000)\n thread = threading.Thread(target=Program)\n thread.start()\n\n", "sub_path": "ews.py", "file_name": "ews.py", "file_ext": "py", "file_size_in_byte": 20041, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pyautogui.FAILSAFE", "line_number": 15, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 66, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 77, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 77, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 78, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 79, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 79, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 81, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 81, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 82, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 83, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 84, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 85, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 85, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 86, "usage_type": "call"}, {"api_name": "pyautogui.position", "line_number": 87, "usage_type": "call"}, {"api_name": "pyautogui.screenshot", "line_number": 88, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 101, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 101, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 105, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 105, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 107, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 107, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 108, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 109, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 109, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 110, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 111, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 111, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 112, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 113, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 113, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 114, "usage_type": "call"}, {"api_name": "pyautogui.position", "line_number": 115, "usage_type": "call"}, {"api_name": "pyautogui.screenshot", "line_number": 116, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 128, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pyautogui.screenshot", "line_number": 134, "usage_type": "call"}, {"api_name": "pyautogui.position", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 156, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 156, "usage_type": "attribute"}, {"api_name": "pyautogui.click", "line_number": 158, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 160, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 160, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 165, "usage_type": "attribute"}, {"api_name": "winsound.Beep", "line_number": 180, "usage_type": "call"}, {"api_name": "winsound.Beep", "line_number": 183, "usage_type": "call"}, {"api_name": "winsound.Beep", "line_number": 186, "usage_type": "call"}, {"api_name": "pyautogui.pixelMatchesColor", "line_number": 198, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 199, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 200, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 200, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 201, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 203, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 204, "usage_type": "call"}, {"api_name": "pyautogui.pixelMatchesColor", "line_number": 213, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 214, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 214, "usage_type": "attribute"}, {"api_name": "pyautogui.click", "line_number": 224, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 225, "usage_type": "call"}, {"api_name": "pyautogui.pixelMatchesColor", "line_number": 226, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 229, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 233, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 233, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 239, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 243, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 244, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 244, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 245, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 247, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 248, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 252, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 252, "usage_type": "call"}, {"api_name": "pyautogui.pixelMatchesColor", "line_number": 257, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 258, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 259, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 260, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 261, "usage_type": "call"}, {"api_name": "pyautogui.pixelMatchesColor", "line_number": 265, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 266, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 267, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 268, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 269, "usage_type": "call"}, {"api_name": "pyautogui.pixelMatchesColor", "line_number": 273, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 274, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 275, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 276, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 277, "usage_type": "call"}, {"api_name": "pyautogui.pixelMatchesColor", "line_number": 281, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 282, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 283, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 284, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 285, "usage_type": "call"}, {"api_name": "pyautogui.pixelMatchesColor", "line_number": 289, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 290, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 291, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 292, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 293, "usage_type": "call"}, {"api_name": "pyautogui.pixelMatchesColor", "line_number": 297, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 298, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 299, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 300, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 301, "usage_type": "call"}, {"api_name": "pyautogui.pixelMatchesColor", "line_number": 305, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 306, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 307, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 308, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 309, "usage_type": "call"}, {"api_name": "pyautogui.pixelMatchesColor", "line_number": 313, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 314, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 315, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 316, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 317, "usage_type": "call"}, {"api_name": "pyautogui.pixelMatchesColor", "line_number": 322, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 323, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 323, "usage_type": "attribute"}, {"api_name": "pyautogui.click", "line_number": 332, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 333, "usage_type": "call"}, {"api_name": "pyautogui.pixelMatchesColor", "line_number": 334, "usage_type": "call"}, {"api_name": "pyautogui.click", "line_number": 337, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 341, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 341, "usage_type": "attribute"}, {"api_name": "pyautogui.pixelMatchesColor", "line_number": 348, "usage_type": "call"}, {"api_name": "pyautogui.keyDown", "line_number": 350, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 351, "usage_type": "call"}, {"api_name": "pyautogui.keyUp", "line_number": 352, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 353, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 354, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 358, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 359, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 359, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 360, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 362, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 363, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 367, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 367, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 373, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 401, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 404, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 405, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 406, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 407, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 408, "usage_type": "attribute"}, {"api_name": "tkinter.TOP", "line_number": 409, "usage_type": "attribute"}, {"api_name": "tkinter.Button", "line_number": 410, "usage_type": "call"}, {"api_name": "tkinter.TOP", "line_number": 411, "usage_type": "attribute"}, {"api_name": "tkinter.Text", "line_number": 413, "usage_type": "call"}, {"api_name": "tkinter.END", "line_number": 414, "usage_type": "attribute"}, {"api_name": "tkinter.BOTTOM", "line_number": 415, "usage_type": "attribute"}, {"api_name": "tkinter.Entry", "line_number": 417, "usage_type": "call"}, {"api_name": "tkinter.BOTTOM", "line_number": 418, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 420, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 421, "usage_type": "attribute"}, {"api_name": "tkinter.Entry", "line_number": 422, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 423, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 424, "usage_type": "attribute"}, {"api_name": "tkinter.Label", "line_number": 425, "usage_type": "call"}, {"api_name": "tkinter.RIGHT", "line_number": 426, "usage_type": "attribute"}, {"api_name": "tkinter.Entry", "line_number": 427, "usage_type": "call"}, {"api_name": "tkinter.RIGHT", "line_number": 428, "usage_type": "attribute"}, {"api_name": "tkinter.END", "line_number": 429, "usage_type": "attribute"}, {"api_name": "tkinter.StringVar", "line_number": 430, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 432, "usage_type": "call"}, {"api_name": "tkinter.LEFT", "line_number": 433, "usage_type": "attribute"}, {"api_name": "sys.setrecursionlimit", "line_number": 442, "usage_type": "call"}, {"api_name": "threading.stack_size", "line_number": 443, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 444, "usage_type": "call"}]} +{"seq_id": "211310013", "text": "from app.dao.book_dao import BookDao\nfrom app.model.sale import Sale\n\nfrom typing import List\n\n\nclass SaleDao:\n\n list_sales: List[Sale] = []\n\n def __init__(self) -> None:\n self._sale: List = []\n\n def add(self, sale: Sale) -> None:\n if not isinstance(sale, Sale):\n raise TypeError('O argumento passado não é do tipo Sale')\n\n self._sale.append(sale)\n self.list_sales.append(sale)\n\n def checkout(self) -> None:\n informations = '\\n--- Venda realizada com sucesso ---\\n'\n total_sale = 0\n \n for item in self._sale:\n total_sale += item.price_sale*item.quantity_sale\n informations += str(item)\n \n informations += f'\\nPreço total: R$ {total_sale:.2f}'\n self._sale = []\n \n print(informations)\n", "sub_path": "app/dao/sale_dao.py", "file_name": "sale_dao.py", "file_ext": "py", "file_size_in_byte": 822, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "typing.List", "line_number": 9, "usage_type": "name"}, {"api_name": "app.model.sale.Sale", "line_number": 9, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 12, "usage_type": "name"}, {"api_name": "app.model.sale.Sale", "line_number": 14, "usage_type": "name"}, {"api_name": "app.model.sale.Sale", "line_number": 15, "usage_type": "argument"}]} +{"seq_id": "252700170", "text": "import os\nimport torchvision\nimport torch.utils.data\nimport torch.nn.init\nimport torch.optim as optim\nfrom GRU_cell import GRUCell, LSTMCell\nimport numpy as np\nimport datetime\nimport dateutil.tz\nimport argparse\nimport data\nfrom modules import *\n\ndef write_log(log, log_path):\n f = open(log_path, mode='a')\n f.write(str(log))\n f.write('\\n')\n f.close()\n\ndef concat(*data_list):\n return torch.cat(data_list, 1)\n\nclass bouncing_balls(torch.utils.data.Dataset):\n def __init__(self, path, size):\n self.path = path\n self.length = size\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n data = np.load(self.path + '/%d.npy' % (idx))\n\n return torch.from_numpy(data)\n\nclass Sprites(torch.utils.data.Dataset):\n def __init__(self, path, size):\n self.path = path\n self.length = size\n\n def __len__(self):\n return self.length\n\n def __getitem__(self, idx):\n return torch.load(self.path + '/%d.sprite' % (idx + 1))\n\n\nclass FullQDisentangledVAE(nn.Module):\n def __init__(self, frames, z_dim, conv_dim, hidden_dim, block_size, channel, shape,dataset, device):\n super(FullQDisentangledVAE, self).__init__()\n self.z_dim = z_dim\n self.frames = frames\n self.conv_dim = conv_dim\n self.hidden_dim = hidden_dim\n self.device = device\n self.dataset = dataset\n self.block_size = block_size\n\n self.z_lstm = nn.LSTM(self.hidden_dim, self.hidden_dim, 1, batch_first=True)\n #self.z_lstm = nn.LSTM(self.hidden_dim, self.hidden_dim // 2, 1, bidirectional=True, batch_first=True)\n self.z_post_out = nn.Linear(self.hidden_dim, self.z_dim * 2)\n\n self.z_prior_out = nn.Linear(self.hidden_dim, self.z_dim * 2)\n\n self.z_to_c_fwd_list = [\n GRUCell(input_size=self.z_dim, hidden_size=self.hidden_dim // self.block_size).to(self.device)\n for i in range(self.block_size)]\n\n # observation encoder / decoder\n self.enc_obs = Encoder(feat_size=self.hidden_dim, output_size=self.hidden_dim, channel=channel, shape=shape)\n self.dec_obs = Decoder(input_size=self.z_dim, feat_size=self.hidden_dim, channel=channel, dataset=self.dataset, shape=shape)\n\n def reparameterize(self, mean, logvar, random_sampling=True):\n # Reparametrization occurs only if random sampling is set to true, otherwise mean is returned\n if random_sampling is True:\n eps = torch.randn_like(logvar)\n std = torch.exp(0.5 * logvar)\n z = mean + eps * std\n return z\n else:\n return mean\n\n def encode_z(self, x):\n batch_size = x.shape[0]\n seq_size = x.shape[1]\n each_block_size = self.z_dim//self.block_size\n lstm_out, _ = self.z_lstm(x)\n #lstm_out, _ = self.z_rnn(lstm_out)\n\n z_post_mean_list = []\n z_post_lar_list = []\n z_prior_mean_list = []\n z_prior_lar_list = []\n zt_obs_list = []\n\n zt_1_post = self.z_post_out(lstm_out[:,0])\n zt_1_mean = zt_1_post[:, :self.z_dim]\n zt_1_lar = zt_1_post[:, self.z_dim:]\n\n post_z_1 = self.reparameterize(zt_1_mean, zt_1_lar, self.training)\n\n #zt_1 = torch.zeros(batch_size, self.z_dim).to(device)\n #z_fwd = post_z_1.new_zeros(batch_size, self.z_dim)\n z_fwd_list = [torch.zeros(batch_size, self.hidden_dim // self.block_size).to(self.device) for i in\n range(self.block_size)]\n zt_obs_list.append(post_z_1)\n\n for t in range(1, seq_size):\n # posterior over ct, q(ct|ot,ft)\n z_post_out = self.z_post_out(lstm_out[:, t])\n zt_post_mean = z_post_out[:, :self.z_dim]\n zt_post_lar = z_post_out[:, self.z_dim:]\n\n z_post_mean_list.append(zt_post_mean)\n z_post_lar_list.append(zt_post_lar)\n z_post_sample = self.reparameterize(zt_post_mean, zt_post_lar, self.training)\n\n\n for fwd_t in range(self.block_size):\n # prior over ct of each block, ct_i~p(ct_i|zt-1_i)\n '''\n if fwd_t == 0:\n zt_1_tmp = concat(post_z_1[:, 0 * each_block_size:1 * each_block_size],\n torch.zeros(batch_size, (self.block_size - 1) * each_block_size).to(self.device))\n elif fwd_t == (self.block_size - 1):\n zt_1_tmp = concat(torch.zeros(batch_size, (self.block_size - 2) * each_block_size).to(self.device),\n post_z_1[:, (fwd_t - 1) * each_block_size:(fwd_t + 1) * each_block_size])\n else:\n zt_1_tmp = concat(post_z_1[:, (fwd_t - 1) * each_block_size: (fwd_t + 1) * each_block_size],\n torch.zeros(batch_size, (self.block_size - 2) * each_block_size).to(self.device))\n '''\n if fwd_t == 0:\n zt_1_tmp = concat(z_post_sample[:, 0 * each_block_size:1 * each_block_size],\n torch.zeros(batch_size, (self.block_size - 1) * each_block_size).to(self.device))\n elif fwd_t == (self.block_size - 1):\n zt_1_tmp = concat(torch.zeros(batch_size, (self.block_size - 1) * each_block_size).to(self.device),\n z_post_sample[:, fwd_t * each_block_size:(fwd_t + 1) * each_block_size])\n else:\n zt_1_tmp = concat(torch.zeros(batch_size, fwd_t * each_block_size).to(self.device),\n z_post_sample[:, fwd_t * each_block_size: (fwd_t + 1) * each_block_size],\n torch.zeros(batch_size, (self.block_size - 1-fwd_t) * each_block_size).to(self.device))\n z_fwd_list[fwd_t] = self.z_to_c_fwd_list[fwd_t](zt_1_tmp, z_fwd_list[fwd_t])#,w1=wt1[:,fwd_t].view(-1,1))\n\n z_fwd_all = torch.stack(z_fwd_list, dim=2).view(batch_size, self.hidden_dim) # .mean(dim=2)\n # prior over ct of each block, ct_i~p(ct_i|zt-1_i)\n #z_fwd = self.z_to_z_fwd(z_post_sample, z_fwd)\n\n # p(xt|zt)\n zt_obs_list.append(z_post_sample)\n z_prior_fwd = self.z_prior_out(z_fwd_all)\n\n z_fwd_latent_mean = z_prior_fwd[:, :self.z_dim]\n z_fwd_latent_lar = z_prior_fwd[:, self.z_dim:]\n\n # store the prior of ct_i\n z_prior_mean_list.append(z_fwd_latent_mean)\n z_prior_lar_list.append(z_fwd_latent_lar)\n\n post_z_1 = z_post_sample\n\n zt_obs_list = torch.stack(zt_obs_list, dim=1)\n z_post_mean_list = torch.stack(z_post_mean_list, dim=1)\n z_post_lar_list = torch.stack(z_post_lar_list, dim=1)\n z_prior_mean_list = torch.stack(z_prior_mean_list, dim=1)\n z_prior_lar_list = torch.stack(z_prior_lar_list, dim=1)\n\n return zt_1_mean, zt_1_lar, z_post_mean_list, z_post_lar_list, z_prior_mean_list, z_prior_lar_list, zt_obs_list\n\n def forward(self, x):\n num_samples = x.shape[0]\n seq_len = x.shape[1]\n conv_x = self.enc_obs(x.view(-1, *x.size()[2:])).view(num_samples, seq_len, -1)\n zt_1_mean, zt_1_lar, post_zt_mean, post_zt_lar, prior_zt_mean, prior_zt_lar, z = self.encode_z(conv_x)\n recon_x = self.dec_obs(z.view(num_samples * seq_len, -1)).view(num_samples, seq_len, *x.size()[2:])\n return zt_1_mean, zt_1_lar, post_zt_mean, post_zt_lar, prior_zt_mean, prior_zt_lar, z, recon_x\n\ndef loss_fn(dataset, original_seq, recon_seq, zt_1_mean, zt_1_lar,z_post_mean, z_post_logvar, z_prior_mean, z_prior_logvar):\n\n if dataset == 'lpc':\n obs_cost = F.mse_loss(recon_seq,original_seq, size_average=False)\n elif dataset == 'moving_mnist' or dataset=='bouncing_balls':\n obs_cost = F.binary_cross_entropy(recon_seq, original_seq, size_average=False) #binary_cross_entropy\n batch_size = recon_seq.shape[0]\n\n # compute kl related to states, kl(q(ct|ot,ft)||p(ct|zt-1))\n kld_z0 = -0.5 * torch.sum(1 + zt_1_lar - torch.pow(zt_1_mean, 2) - torch.exp(zt_1_lar))\n z_post_var = torch.exp(z_post_logvar)\n z_prior_var = torch.exp(z_prior_logvar)\n kld_z = 0.5 * torch.sum(\n z_prior_logvar - z_post_logvar + ((z_post_var + torch.pow(z_post_mean - z_prior_mean, 2)) / z_prior_var) - 1)\n\n return (obs_cost + kld_z + kld_z0 )/batch_size , (kld_z + kld_z0 )/batch_size\n\nclass Trainer(object):\n def __init__(self, model, device, train, test, epochs, batch_size, learning_rate, nsamples,\n sample_path, recon_path, checkpoints, log_path, grad_clip, channle):\n self.train = train\n self.test = test\n self.start_epoch = 0\n self.epochs = epochs\n self.device = device\n self.batch_size = batch_size\n self.model = model\n self.grad_clip = grad_clip\n self.model.to(device)\n self.learning_rate = learning_rate\n self.checkpoints = checkpoints\n self.optimizer = optim.Adam(self.model.parameters(), self.learning_rate, weight_decay=1e-4)\n self.samples = nsamples\n self.sample_path = sample_path\n self.recon_path = recon_path\n self.log_path = log_path\n self.epoch_losses = []\n self.channel = channel\n\n def save_checkpoint(self, epoch):\n torch.save({\n 'epoch': epoch + 1,\n 'state_dict': self.model.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'losses': self.epoch_losses},\n self.checkpoints)\n\n def load_checkpoint(self):\n try:\n print(\"Loading Checkpoint from '{}'\".format(self.checkpoints))\n checkpoint = torch.load(self.checkpoints)\n self.start_epoch = checkpoint['epoch']\n self.model.load_state_dict(checkpoint['state_dict'])\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.epoch_losses = checkpoint['losses']\n print(\"Resuming Training From Epoch {}\".format(self.start_epoch))\n except:\n print(\"No Checkpoint Exists At '{}'.Start Fresh Training\".format(self.checkpoints))\n self.start_epoch = 0\n\n def sample_frames(self, epoch, sample):\n with torch.no_grad():\n zt_dec = []\n each_block_size = self.model.z_dim//self.model.block_size\n len = sample.shape[0]\n #len = self.samples\n x = self.model.enc_obs(sample.view(-1, *sample.size()[2:])).view(1, sample.shape[1], -1)\n lstm_out, _ = self.model.z_lstm(x)\n #lstm_out, _ = self.model.z_rnn(lstm_out)\n\n\n zt_1_post = self.model.z_post_out(lstm_out[:, 0])\n zt_1_mean = zt_1_post[:, :self.model.z_dim]\n zt_1_lar = zt_1_post[:, self.model.z_dim:]\n\n zt_1 = self.model.reparameterize(zt_1_mean, zt_1_lar, self.model.training)\n\n #zt_1 = [Normal(torch.zeros(self.model.z_dim).to(self.device), torch.ones(self.model.z_dim).to(self.device)).rsample() for i in range(len)]\n #zt_1 = torch.stack(zt_1, dim=0)\n\n #z_fwd = zt_1.new_zeros(len, self.model.z_dim)\n z_fwd_list = [torch.zeros(len, self.model.hidden_dim // self.model.block_size).to(self.device) for i in\n range(self.model.block_size)]\n zt_dec.append(zt_1)\n\n for t in range(1, x.shape[1]):\n\n for fwd_t in range(self.model.block_size):\n # prior over ct of each block, ct_i~p(ct_i|zt-1_i)\n '''\n if fwd_t == 0:\n zt_1_tmp = concat(zt_1[:, 0 * each_block_size:1 * each_block_size],\n torch.zeros(len, (self.model.block_size - 1) * each_block_size).to(\n self.device))\n elif fwd_t == (self.model.block_size - 1):\n zt_1_tmp = concat(\n torch.zeros(len, (self.model.block_size - 2) * each_block_size).to(self.device),\n zt_1[:, (fwd_t - 1) * each_block_size:(fwd_t + 1) * each_block_size])\n else:\n zt_1_tmp = concat(\n zt_1[:, (fwd_t - 1) * each_block_size: (fwd_t + 1) * each_block_size],\n torch.zeros(len, (self.model.block_size - 2) * each_block_size).to(self.device))\n '''\n if fwd_t == 0:\n zt_1_tmp = concat(zt_1[:, 0 * each_block_size:1 * each_block_size],\n torch.zeros(len, (self.model.block_size - 1) * each_block_size).to(self.device))\n elif fwd_t == (self.model.block_size - 1):\n zt_1_tmp = concat(torch.zeros(len, (self.model.block_size - 1) * each_block_size).to(self.device),\n zt_1[:, fwd_t * each_block_size:(fwd_t + 1) * each_block_size])\n else:\n zt_1_tmp = concat(torch.zeros(len, fwd_t * each_block_size).to(self.device),\n zt_1[:, fwd_t * each_block_size: (fwd_t + 1) * each_block_size],\n torch.zeros(len, (self.model.block_size - 1 - fwd_t) * each_block_size).to(self.device))\n z_fwd_list[fwd_t] = self.model.z_to_c_fwd_list[fwd_t](zt_1_tmp,\n z_fwd_list[fwd_t]) # ,w1=wt1[:,fwd_t].view(-1,1))\n\n # prior over ct of each block, ct_i~p(ct_i|zt-1_i)\n #z_fwd = self.model.z_to_z_fwd(zt_1, z_fwd)\n z_fwd_all = torch.stack(z_fwd_list, dim=2).view(len, self.model.hidden_dim) # .mean(dim=2)\n z_prior_fwd = self.model.z_prior_out(z_fwd_all)\n\n z_fwd_latent_mean = z_prior_fwd[:, :self.model.z_dim]\n z_fwd_latent_lar = z_prior_fwd[:, self.model.z_dim:]\n\n zt = self.model.reparameterize(z_fwd_latent_mean, z_fwd_latent_lar, self.model.training)\n zt_dec.append(zt)\n zt_1 = zt\n\n zt_dec = torch.stack(zt_dec, dim=1)\n recon_x = self.model.dec_obs(zt_dec.view(len*self.model.frames,-1)).view(len, self.model.frames,-1)\n recon_x = recon_x.view(len*x.shape[1], self.channel, sample.shape[3], sample.shape[3])\n torchvision.utils.save_image(recon_x, '%s/epoch%d.png' % (self.sample_path, epoch))\n\n def recon_frame(self, epoch, original):\n with torch.no_grad():\n _, _, _, _, _, _,_, recon = self.model(original)\n image = torch.cat((original, recon), dim=0)\n image = image.view(2*original.shape[1], self.channel, original.shape[3], original.shape[3])\n torchvision.utils.save_image(image, '%s/epoch%d.png' % (self.recon_path, epoch))\n\n def train_model(self):\n self.model.train()\n sample = iter(self.test).next().to(self.device)\n self.sample_frames(0 + 1, sample)\n self.recon_frame(0 + 1, sample)\n for epoch in range(self.start_epoch, self.epochs):\n losses = []\n kl_loss = []\n write_log(\"Running Epoch : {}\".format(epoch + 1), self.log_path)\n for i, data in enumerate(self.train):\n data = data.to(self.device)\n self.optimizer.zero_grad()\n zt_1_mean, zt_1_lar, post_zt_mean, post_zt_lar, prior_zt_mean, prior_zt_lar, z, recon_x = self.model(data)\n loss, kl = loss_fn(self.model.dataset, data, recon_x, zt_1_mean, zt_1_lar, post_zt_mean, post_zt_lar, prior_zt_mean,\n prior_zt_lar)\n loss.backward()\n write_log('mse loss is %f, kl loss is %f'%(loss, kl), self.log_path)\n print('index is %d, mse loss is %f, kl loss is %f'%(i, loss, kl))\n if self.grad_clip > 0.0:\n nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip)\n self.optimizer.step()\n kl_loss.append(kl.item())\n losses.append(loss.item())\n meanloss = np.mean(losses)\n klloss = np.mean(kl_loss)\n self.epoch_losses.append((meanloss,klloss))\n write_log(\"Epoch {} : Average Loss: {}, KL loss :{}\".format(epoch + 1, meanloss, klloss), self.log_path)\n #self.save_checkpoint(epoch)\n self.model.eval()\n \n sample = iter(self.test).next().to(self.device)\n self.sample_frames(epoch + 1, sample)\n self.recon_frame(epoch + 1, sample)\n #self.style_transfer(epoch + 1)\n self.model.train()\n print(\"Training is complete\")\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description=\"vanilla_vae\")\n parser.add_argument('--seed', type=int, default=111)\n # method\n parser.add_argument('--method', type=str, default='Vanilla')\n # dataset\n parser.add_argument('--dset_name', type=str, default='bouncing_balls') #moving_mnist, lpc, bouncing_balls\n # state size\n parser.add_argument('--z-dim', type=int, default=36) # 72 144\n parser.add_argument('--hidden-dim', type=int, default=216) # 216 252\n parser.add_argument('--conv-dim', type=int, default=256) # 256 512\n parser.add_argument('--block_size', type=int, default=3) # 3 4\n # data size\n parser.add_argument('--batch-size', type=int, default=32)\n parser.add_argument('--frame-size', type=int, default=20)\n parser.add_argument('--nsamples', type=int, default=2)\n\n # optimization\n parser.add_argument('--learn-rate', type=float, default=0.0005)\n parser.add_argument('--grad-clip', type=float, default=0.0)\n parser.add_argument('--max-epochs', type=int, default=100)\n parser.add_argument('--gpu_id', type=int, default=1)\n\n FLAGS = parser.parse_args()\n np.random.seed(FLAGS.seed)\n torch.manual_seed(FLAGS.seed)\n torch.cuda.manual_seed(FLAGS.seed)\n device = torch.device('cuda:%d'%(FLAGS.gpu_id) if torch.cuda.is_available() else 'cpu')\n\n if FLAGS.dset_name == 'lpc':\n sprite = Sprites('./dataset/lpc-dataset/train/', 6000)\n sprite_test = Sprites('./dataset/lpc-dataset/test/', 300)\n train_loader = torch.utils.data.DataLoader(sprite, batch_size=FLAGS.batch_size, shuffle=True, num_workers=4)\n test_loader = torch.utils.data.DataLoader(sprite_test, batch_size=1, shuffle=FLAGS, num_workers=4)\n channel = 3\n shape =64\n elif FLAGS.dset_name == 'moving_mnist':\n FLAGS.dset_path = os.path.join('./datasets', FLAGS.dset_name)\n train_loader, test_loader = data.get_data_loader(FLAGS, True)\n channel = 1\n shape=64\n elif FLAGS.dset_name == 'bouncing_balls':\n sprite = bouncing_balls('./bouncing_ball/dataset/', 6000)\n sprite_test = bouncing_balls('./bouncing_ball/dataset/', 300)\n train_loader = torch.utils.data.DataLoader(sprite, batch_size=FLAGS.batch_size, shuffle=True, num_workers=4)\n test_loader = torch.utils.data.DataLoader(sprite_test, batch_size=1, shuffle=True, num_workers=4)\n channel = 3\n shape = 32\n\n\n vae = FullQDisentangledVAE(frames=FLAGS.frame_size, z_dim=FLAGS.z_dim, hidden_dim=FLAGS.hidden_dim, conv_dim=FLAGS.conv_dim, block_size=FLAGS.block_size,channel=channel, shape=shape, dataset=FLAGS.dset_name, device=device)\n # set writer\n starttime = datetime.datetime.now()\n now = datetime.datetime.now(dateutil.tz.tzlocal())\n time_dir = now.strftime('%Y_%m_%d_%H_%M_%S')\n base_path = './%s/%s/%s'%(FLAGS.dset_name, FLAGS.method, time_dir)\n model_path = '%s/model' % (base_path)\n log_recon = '%s/recon' % (base_path)\n log_sample = '%s/sample' % (base_path)\n log_path = '%s/log_info.txt' % (base_path)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n if not os.path.exists(log_recon):\n os.makedirs(log_recon)\n if not os.path.exists(log_sample):\n os.makedirs(log_sample)\n\n write_log(vae, log_path)\n write_log(FLAGS, log_path)\n\n trainer = Trainer(vae, device, train_loader, test_loader, epochs=FLAGS.max_epochs, batch_size=FLAGS.batch_size,\n learning_rate=FLAGS.learn_rate, checkpoints='%s/%s-disentangled-vae.model'%(model_path, FLAGS.method), nsamples=FLAGS.nsamples,\n sample_path=log_sample,\n recon_path=log_recon, log_path=log_path, grad_clip=FLAGS.grad_clip, channle=channel)\n #trainer.load_checkpoint()\n trainer.train_model()\n endtime = datetime.datetime.now()\n seconds = (endtime - starttime).seconds\n hours = seconds // 3600\n minutes = (seconds % 3600) // 60\n second = (seconds % 3600) % 60\n print((endtime - starttime))\n timeStr = \"running time: \" + str(hours) + 'hours' + str(minutes) + 'minutes' + str(second) + \"second\"\n write_log(timeStr, log_path)\n", "sub_path": "vanilla_vae.py", "file_name": "vanilla_vae.py", "file_ext": "py", "file_size_in_byte": 20966, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "torch.utils.data.cat", "line_number": 21, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 21, "usage_type": "name"}, {"api_name": "torch.utils.data.utils", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 23, "usage_type": "name"}, {"api_name": "numpy.load", "line_number": 32, "usage_type": "call"}, {"api_name": "torch.utils.data.from_numpy", "line_number": 34, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 34, "usage_type": "name"}, {"api_name": "torch.utils.data.utils", "line_number": 36, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 36, "usage_type": "name"}, {"api_name": "torch.utils.data.load", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 45, "usage_type": "name"}, {"api_name": "GRU_cell.GRUCell", "line_number": 66, "usage_type": "call"}, {"api_name": "torch.utils.data.randn_like", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 76, "usage_type": "name"}, {"api_name": "torch.utils.data.exp", "line_number": 77, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.utils.data.zeros", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.utils.data.zeros", "line_number": 134, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 134, "usage_type": "name"}, {"api_name": "torch.utils.data.zeros", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.utils.data.zeros", "line_number": 139, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 139, "usage_type": "name"}, {"api_name": "torch.utils.data.zeros", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 141, "usage_type": "name"}, {"api_name": "torch.utils.data.stack", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.utils.data.stack", "line_number": 161, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 161, "usage_type": "name"}, {"api_name": "torch.utils.data.stack", "line_number": 162, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 162, "usage_type": "name"}, {"api_name": "torch.utils.data.stack", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 163, "usage_type": "name"}, {"api_name": "torch.utils.data.stack", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 164, "usage_type": "name"}, {"api_name": "torch.utils.data.stack", "line_number": 165, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 165, "usage_type": "name"}, {"api_name": "torch.utils.data.sum", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 186, "usage_type": "name"}, {"api_name": "torch.utils.data.pow", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.utils.data.exp", "line_number": 186, "usage_type": "call"}, {"api_name": "torch.utils.data.exp", "line_number": 187, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 187, "usage_type": "name"}, {"api_name": "torch.utils.data.exp", "line_number": 188, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 188, "usage_type": "name"}, {"api_name": "torch.utils.data.sum", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 189, "usage_type": "name"}, {"api_name": "torch.utils.data.pow", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 190, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 208, "usage_type": "name"}, {"api_name": "torch.utils.data.save", "line_number": 217, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 217, "usage_type": "name"}, {"api_name": "torch.utils.data.load", "line_number": 227, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 227, "usage_type": "name"}, {"api_name": "torch.utils.data.no_grad", "line_number": 238, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 238, "usage_type": "name"}, {"api_name": "torch.utils.data.zeros", "line_number": 258, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 258, "usage_type": "name"}, {"api_name": "torch.utils.data.zeros", "line_number": 282, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 282, "usage_type": "name"}, {"api_name": "torch.utils.data.zeros", "line_number": 284, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 284, "usage_type": "name"}, {"api_name": "torch.utils.data.zeros", "line_number": 287, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 287, "usage_type": "name"}, {"api_name": "torch.utils.data.zeros", "line_number": 289, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 289, "usage_type": "name"}, {"api_name": "torch.utils.data.stack", "line_number": 295, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 295, "usage_type": "name"}, {"api_name": "torch.utils.data.stack", "line_number": 305, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 305, "usage_type": "name"}, {"api_name": "torchvision.utils.save_image", "line_number": 308, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 308, "usage_type": "attribute"}, {"api_name": "torch.utils.data.no_grad", "line_number": 311, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 311, "usage_type": "name"}, {"api_name": "torch.utils.data.cat", "line_number": 313, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 313, "usage_type": "name"}, {"api_name": "torchvision.utils.save_image", "line_number": 315, "usage_type": "call"}, {"api_name": "torchvision.utils", "line_number": 315, "usage_type": "attribute"}, {"api_name": "data.to", "line_number": 327, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 340, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 341, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 357, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 380, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 380, "usage_type": "attribute"}, {"api_name": "torch.utils.data.manual_seed", "line_number": 381, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 381, "usage_type": "name"}, {"api_name": "torch.utils.data.cuda.manual_seed", "line_number": 382, "usage_type": "call"}, {"api_name": "torch.utils.data.cuda", "line_number": 382, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 382, "usage_type": "name"}, {"api_name": "torch.utils.data.device", "line_number": 383, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 383, "usage_type": "name"}, {"api_name": "torch.utils.data.cuda.is_available", "line_number": 383, "usage_type": "call"}, {"api_name": "torch.utils.data.cuda", "line_number": 383, "usage_type": "attribute"}, {"api_name": "torch.utils.data.utils.data.DataLoader", "line_number": 388, "usage_type": "call"}, {"api_name": "torch.utils.data.utils", "line_number": 388, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 388, "usage_type": "name"}, {"api_name": "torch.utils.data.utils.data.DataLoader", "line_number": 389, "usage_type": "call"}, {"api_name": "torch.utils.data.utils", "line_number": 389, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 389, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 393, "usage_type": "call"}, {"api_name": "os.path", "line_number": 393, "usage_type": "attribute"}, {"api_name": "data.get_data_loader", "line_number": 394, "usage_type": "call"}, {"api_name": "torch.utils.data.utils.data.DataLoader", "line_number": 400, "usage_type": "call"}, {"api_name": "torch.utils.data.utils", "line_number": 400, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 400, "usage_type": "name"}, {"api_name": "torch.utils.data.utils.data.DataLoader", "line_number": 401, "usage_type": "call"}, {"api_name": "torch.utils.data.utils", "line_number": 401, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 401, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 408, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 408, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 409, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 409, "usage_type": "attribute"}, {"api_name": "dateutil.tz.tz.tzlocal", "line_number": 409, "usage_type": "call"}, {"api_name": "dateutil.tz.tz", "line_number": 409, "usage_type": "attribute"}, {"api_name": "dateutil.tz", "line_number": 409, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 416, "usage_type": "call"}, {"api_name": "os.path", "line_number": 416, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 417, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 418, "usage_type": "call"}, {"api_name": "os.path", "line_number": 418, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 419, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 420, "usage_type": "call"}, {"api_name": "os.path", "line_number": 420, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 421, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 432, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 432, "usage_type": "attribute"}]} +{"seq_id": "351540852", "text": "# Collect entropy-based reward policies.\n\n# python walker_collect_sac.py --env=\"Walker2d-v2\" --exp_name=test --T=10000 --n=20 --l=2 --hid=300 --epochs=16 --episodes=16 --gaussian --reduce_dim=5 --fully_corrective\n\nimport sys\nimport os\nsys.path.append(os.getenv(\"HOME\") + '/maxent')\nsys.path.append(os.getenv(\"HOME\") + '/spinningup')\n\nimport time\nfrom datetime import datetime\nimport random\n\nimport numpy as np\nfrom tabulate import tabulate\n\nimport gym\nfrom gym import wrappers\nimport tensorflow as tf\n\nimport utils\nimport walker_utils\nimport plotting\nfrom walker_soft_actor_critic import WalkerSoftActorCritic\nfrom experience_buffer import ExperienceBuffer\n\nargs = utils.get_args()\n\nfrom spinup.utils.run_utils import setup_logger_kwargs\n\ndef compute_states_visited_xy(env, policies, T, n, norm=[],\n N=20, initial_state=[], baseline=False):\n \n states_visited_xy = np.zeros(T*n)\n max_idx = len(policies) - 1\n \n for it in range(N): \n p_xy = np.zeros(shape=(tuple(walker_utils.num_states_2d))) \n cumulative_states_visited_xy = 0\n \n # average results over n rollouts\n for iteration in range(n):\n\n env.reset()\n if len(initial_state) > 0:\n qpos = initial_state[:len(walker_utils.qpos)]\n qvel = initial_state[len(walker_utils.qpos):]\n env.env.set_state(qpos, qvel)\n obs = walker_utils.get_state(env, env.env._get_obs())\n\n for t in range(T):\n action = np.zeros(shape=(1,walker_utils.action_dim))\n idx = random.randint(0, max_idx)\n \n if idx == 0 or baseline:\n action = env.action_space.sample()\n else:\n action = policies[idx].get_action(obs, deterministic=args.deterministic)\n\n # Count the cumulative number of new states visited as a function of t.\n obs, _, done, _ = env.step(action)\n obs = walker_utils.get_state(env, obs)\n\n # if this is the first time you are seeing this state, increment.\n if p_xy[tuple(walker_utils.discretize_state_2d(obs, norm, env))] == 0:\n cumulative_states_visited_xy += 1\n \n step = iteration*T + t\n states_visited_xy[step] += cumulative_states_visited_xy\n p_xy[tuple(walker_utils.discretize_state_2d(obs, norm, env))] += 1\n\n if done: # CRITICAL: ignore done signal\n done = False\n \n env.close()\n states_visited_xy /= float(N)\n return states_visited_xy\n\ndef select_action(policies, weights, env, obs):\n\n if len(weights) != len(policies):\n print(\"Weights array is wrong dimension -- using uniform weighting\")\n weights = np.ones(len(policies))/float(len(policies))\n \n indexes = np.arange(len(policies))\n idx = np.random.choice(indexes, p=weights)\n \n if idx == 0:\n action = env.action_space.sample()\n else:\n action = policies[idx].get_action(obs, deterministic=args.deterministic)\n \n return action\n\ndef execute_one_rollout(policies, weights, env, start_obs, \n T, data, norm, wrapped=False, video_dir=''):\n obs = start_obs\n \n p, p_xy, cumulative_states_visited, states_visited, \\\n cumulative_states_visited_xy, states_visited_xy, random_initial_state = data\n \n random_T = np.random.randint(0, T)\n\n uid = 0\n \n for t in range(T):\n\n if wrapped:\n print(t)\n\n uid = uid + 1\n \n action = select_action(policies, weights, env, obs)\n \n # Count the cumulative number of new states visited as a function of t.\n obs, _, done, _ = env.step(action)\n obs = walker_utils.get_state(env, obs, wrapped)\n\n # if this is the first time you are seeing this state, increment.\n if p[tuple(walker_utils.discretize_state(obs, norm, env))] == 0:\n cumulative_states_visited += 1\n states_visited.append(cumulative_states_visited)\n if p_xy[tuple(walker_utils.discretize_state_2d(obs, norm, env))] == 0:\n cumulative_states_visited_xy += 1\n states_visited_xy.append(cumulative_states_visited_xy)\n\n p[tuple(walker_utils.discretize_state(obs, norm, env))] += 1\n p_xy[tuple(walker_utils.discretize_state_2d(obs, norm, env))] += 1\n\n if t == random_T:\n random_initial_state = obs\n\n if done: # CRITICAL: ignore done signal\n done = False\n if wrapped:\n print(t)\n env.close()\n base_env = gym.make('Walker2d-v2')\n env = wrappers.Monitor(base_env, video_dir+'/%d' % uid)\n env.reset()\n uid = uid + 1\n qpos = obs[:len(walker_utils.qpos)]\n qvel = obs[len(walker_utils.qpos):]\n env.unwrapped.set_state(qpos, qvel)\n d = False\n\n \n data = (p, p_xy, cumulative_states_visited, states_visited, \\\n cumulative_states_visited_xy, states_visited_xy, random_initial_state)\n \n return data\n \n# run a simulation to see how the average policy behaves.\ndef execute_average_policy(env, policies, T, weights,\n reward_fn=[], norm=[], initial_state=[], \n n=10, render=False, render_steps=0, \n video_dir='', epoch=0):\n \n p = np.zeros(shape=(tuple(walker_utils.num_states)))\n p_xy = np.zeros(shape=(tuple(walker_utils.num_states_2d)))\n \n cumulative_states_visited = 0\n states_visited = []\n cumulative_states_visited_xy = 0\n states_visited_xy = []\n \n random_initial_state = []\n \n data = (p, p_xy, cumulative_states_visited, states_visited, \n cumulative_states_visited_xy, states_visited_xy, \n random_initial_state)\n \n if len(initial_state) == 0:\n env.reset()\n initial_state = env.env.state_vector()\n\n # average results over n rollouts\n for iteration in range(n):\n \n env.reset()\n \n # onyl get a recording of first iteration\n if render and iteration == 0:\n print('recording mixed iteration....')\n wrapped_env = wrappers.Monitor(env, video_dir)\n wrapped_env.reset()\n qpos = initial_state[:len(walker_utils.qpos)]\n qvel = initial_state[len(walker_utils.qpos):]\n wrapped_env.unwrapped.set_state(qpos, qvel)\n obs = walker_utils.get_state(wrapped_env, wrapped_env.unwrapped._get_obs(), wrapped=True)\n data = execute_one_rollout(policies, weights, wrapped_env, obs, \n T=render_steps, data=data, norm=norm, wrapped=True, video_dir=video_dir)\n else:\n obs = walker_utils.get_state(env, env.env._get_obs())\n data = execute_one_rollout(policies, weights, env, obs, T, data, norm)\n \n env.close()\n \n # expand saved data\n p, p_xy, cumulative_states_visited, states_visited, \\\n cumulative_states_visited_xy, states_visited_xy, random_initial_state = data\n \n p /= float(T*n)\n p_xy /= float(T*n)\n\n return p, p_xy, random_initial_state, states_visited, states_visited_xy\n\ndef grad_ent(pt):\n if args.grad_ent:\n grad_p = -np.log(pt)\n grad_p[grad_p > 100] = 1000\n return grad_p\n\n eps = 1/np.sqrt(walker_utils.total_state_space)\n return 1/(pt + eps)\n\ndef init_state(env): \n env.env.set_state(walker_utils.qpos, walker_utils.qvel)\n state = env.env.state_vector()\n return state\n\ndef entropy(pt):\n utils.log_statement(\"pt size %d\" % pt.size)\n # entropy = -sum(pt*log(pt))\n entropy = 0.0\n for p in pt:\n if p == 0.0:\n continue\n entropy += p*np.log(p)\n return -entropy\n\n# Main loop of maximum entropy program. WORKING HERE\n# Iteratively collect and learn T policies using policy gradients and a reward\n# function based on entropy.\n# Main loop of maximum entropy program. Iteratively collect \n# and learn T policies using policy gradients and a reward function \n# based on entropy.\ndef collect_entropy_policies(env, epochs, T, MODEL_DIR=''):\n\n video_dir = 'videos/' + args.exp_name\n \n direct = os.getcwd()+ '/data/'\n experiment_directory = direct + args.exp_name\n print(experiment_directory)\n \n print(sys.argv)\n if not os.path.exists(experiment_directory):\n os.makedirs(experiment_directory)\n f = open(experiment_directory+'/args', 'w')\n f.write(' '.join(sys.argv))\n f.flush()\n \n indexes = [1,5,10,15]\n states_visited_indexes = [0,5,10,15]\n \n states_visited_cumulative = [] \n states_visited_cumulative_baseline = []\n\n running_avg_p = np.zeros(shape=(tuple(walker_utils.num_states)))\n running_avg_p_xy = np.zeros(shape=(tuple(walker_utils.num_states_2d)))\n running_avg_ent = 0\n running_avg_ent_xy = 0\n entropy_of_running_avg_p = 0.\n\n running_avg_p_baseline = np.zeros(shape=(tuple(walker_utils.num_states)))\n running_avg_p_baseline_xy = np.zeros(shape=(tuple(walker_utils.num_states_2d)))\n running_avg_ent_baseline = 0\n running_avg_ent_baseline_xy = 0\n entropy_of_running_avg_p_baseline = 0.\n\n pct_visited = []\n pct_visited_baseline = []\n pct_visited_xy = []\n pct_visited_xy_baseline = []\n\n running_avg_entropies = []\n running_avg_entropies_xy = []\n running_avg_ps_xy = []\n avg_ps_xy = []\n\n running_avg_entropies_baseline = []\n running_avg_entropies_baseline_xy = []\n running_avg_ps_baseline_xy = []\n avg_ps_baseline_xy = []\n \n running_avg_cumul_entropies = []\n running_avg_cumul_entropies_baseline = []\n\n policies = []\n distributions = []\n initial_state = init_state(env)\n \n prebuf = ExperienceBuffer()\n env.reset()\n for t in range(10000): \n action = env.action_space.sample()\n obs, reward, done, _ = env.step(action)\n prebuf.store(walker_utils.get_state(env, obs))\n if done:\n env.reset()\n done = False\n \n prebuf.normalize()\n normalization_factors = prebuf.normalization_factors\n utils.log_statement(normalization_factors)\n prebuf = None\n if not args.gaussian:\n normalization_factors = []\n\n reward_fn = np.zeros(shape=(tuple(walker_utils.num_states)))\n\n for i in range(epochs):\n utils.log_statement(\"*** ------- EPOCH %d ------- ***\" % i)\n \n # clear initial state if applicable.\n if not args.initial_state:\n initial_state = []\n else:\n utils.log_statement(initial_state)\n utils.log_statement(\"max reward: \" + str(np.max(reward_fn)))\n\n logger_kwargs = setup_logger_kwargs(\"model%02d\" % i, data_dir=experiment_directory)\n\n # Learn policy that maximizes current reward function.\n print(\"Learning new oracle...\")\n seed = random.randint(1, 100000)\n sac = WalkerSoftActorCritic(lambda : gym.make(args.env), reward_fn=reward_fn, xid=i+1,\n seed=seed, gamma=args.gamma, \n ac_kwargs=dict(hidden_sizes=[args.hid]*args.l),\n logger_kwargs=logger_kwargs, \n normalization_factors=normalization_factors)\n\n # The first policy is random\n if i == 0:\n sac.soft_actor_critic(epochs=0) \n else:\n sac.soft_actor_critic(epochs=args.episodes, \n initial_state=initial_state, \n start_steps=args.start_steps) \n policies.append(sac)\n \n p, _ = sac.test_agent(T, normalization_factors=normalization_factors)\n distributions.append(p)\n weights = utils.get_weights(distributions)\n\n epoch = 'epoch_%02d' % (i) \n if args.render:\n sac.record(T=args.record_steps, n=1, video_dir=video_dir+'/baseline/'+epoch, on_policy=False) \n sac.record(T=args.record_steps, n=1, video_dir=video_dir+'/entropy/'+epoch, on_policy=True) \n\n # Execute the cumulative average policy thus far.\n # Estimate distribution and entropy.\n print(\"Executing mixed policy...\")\n average_p, average_p_xy, initial_state, states_visited, states_visited_xy = \\\n execute_average_policy(env, policies, T, weights,\n reward_fn=reward_fn, norm=normalization_factors, \n initial_state=initial_state, n=args.n, \n render=args.render, render_steps=args.record_steps,\n video_dir=video_dir+'/mixed/'+epoch, epoch=i)\n \n print(\"Calculating maxEnt entropy...\")\n round_entropy = entropy(average_p.ravel())\n round_entropy_xy = entropy(average_p_xy.ravel())\n \n # Update running averages for maxEnt.\n print(\"Updating maxEnt running averages...\")\n running_avg_ent = running_avg_ent * (i)/float(i+1) + round_entropy/float(i+1)\n running_avg_ent_xy = running_avg_ent_xy * (i)/float(i+1) + round_entropy_xy/float(i+1)\n running_avg_p *= (i)/float(i+1)\n running_avg_p += average_p/float(i+1)\n running_avg_p_xy *= (i)/float(i+1)\n running_avg_p_xy += average_p_xy/float(i+1)\n \n entropy_of_running_avg_p = (entropy_of_running_avg_p * (i)/float(i+1) + \n entropy(running_avg_p_xy.ravel())/float(i+1))\n \n # update reward function\n print(\"Update reward function\")\n eps = 1/np.sqrt(walker_utils.total_state_space)\n if args.cumulative:\n reward_fn = grad_ent(running_avg_p)\n else:\n reward_fn = 1.\n average_p += eps\n reward_fn /= average_p\n average_p = None # delete big array\n \n # (save for plotting)\n running_avg_entropies.append(running_avg_ent)\n running_avg_entropies_xy.append(running_avg_ent_xy)\n if i in indexes:\n running_avg_ps_xy.append(np.copy(running_avg_p_xy))\n avg_ps_xy.append(np.copy(average_p_xy))\n\n print(\"Collecting baseline experience....\")\n p_baseline, p_baseline_xy, states_visited_baseline, states_visited_xy_baseline = sac.test_agent_random(T, normalization_factors=normalization_factors, n=args.n)\n \n plotting.states_visited_over_time(states_visited, states_visited_baseline, i)\n plotting.states_visited_over_time(states_visited_xy, states_visited_xy_baseline, i, ext='_xy')\n \n # save for cumulative plot.\n if i in states_visited_indexes:\n # average over a whole bunch of rollouts\n # slow: so only do this when needed.\n print(\"Averaging unique xy states visited....\")\n states_visited_xy = compute_states_visited_xy(env, policies, norm=normalization_factors, T=T, n=args.n, N=args.avg_N)\n states_visited_xy_baseline = compute_states_visited_xy(env, policies, norm=normalization_factors, T=T, n=args.n, N=args.avg_N, \n initial_state=initial_state, \n baseline=True)\n states_visited_cumulative.append(states_visited_xy)\n states_visited_cumulative_baseline.append(states_visited_xy_baseline)\n\n print(\"Compute baseline entropy....\")\n round_entropy_baseline = entropy(p_baseline.ravel())\n round_entropy_baseline_xy = entropy(p_baseline_xy.ravel())\n\n # Update baseline running averages.\n print(\"Updating baseline running averages...\")\n running_avg_ent_baseline = running_avg_ent_baseline * (i)/float(i+1) + round_entropy_baseline/float(i+1)\n running_avg_ent_baseline_xy = running_avg_ent_baseline_xy * (i)/float(i+1) + round_entropy_baseline_xy/float(i+1)\n\n running_avg_p_baseline *= (i)/float(i+1) \n running_avg_p_baseline += p_baseline/float(i+1)\n running_avg_p_baseline_xy *= (i)/float(i+1) \n running_avg_p_baseline_xy += p_baseline_xy/float(i+1)\n entropy_of_running_avg_p_baseline = (entropy_of_running_avg_p_baseline * (i)/float(i+1) +\n entropy(running_avg_p_baseline_xy.ravel())/float(i+1))\n \n p_baseline = None\n \n # (save for plotting)\n running_avg_cumul_entropies.append(entropy_of_running_avg_p)\n running_avg_cumul_entropies_baseline.append(entropy_of_running_avg_p_baseline)\n \n running_avg_entropies_baseline.append(running_avg_ent_baseline)\n running_avg_entropies_baseline_xy.append(running_avg_ent_baseline_xy)\n if i in indexes:\n running_avg_ps_baseline_xy.append(np.copy(running_avg_p_baseline_xy))\n avg_ps_baseline_xy.append(np.copy(p_baseline_xy))\n \n utils.log_statement(average_p_xy)\n utils.log_statement(p_baseline_xy)\n \n # Calculate percent of state space visited.\n pct = np.count_nonzero(running_avg_p)/float(running_avg_p.size)\n pct_visited.append(pct)\n pct_xy = np.count_nonzero(running_avg_p_xy)/float(running_avg_p_xy.size)\n pct_visited_xy.append(pct_xy)\n \n pct_baseline = np.count_nonzero(running_avg_p_baseline)/float(running_avg_p_baseline.size)\n pct_visited_baseline.append(pct_baseline)\n pct_xy_baseline = np.count_nonzero(running_avg_p_baseline_xy)/float(running_avg_p_baseline_xy.size)\n pct_visited_xy_baseline.append(pct_xy_baseline)\n \n # Print round summary.\n col_headers = [\"\", \"baseline\", \"maxEnt\"]\n col1 = [\"round_entropy_xy\", \n \"running_avg_ent_xy\", \n \"round_entropy\", \n \"running_avg_ent\", \n \"% state space xy\", \n \"% total state space\"]\n col2 = [round_entropy_baseline_xy, running_avg_ent_baseline_xy, \n round_entropy_baseline, running_avg_ent_baseline, \n pct_xy_baseline, pct_baseline]\n col3 = [round_entropy_xy, running_avg_ent_xy, \n round_entropy, running_avg_ent, \n pct_xy, pct]\n table = tabulate(np.transpose([col1, col2, col3]), col_headers, tablefmt=\"fancy_grid\", floatfmt=\".4f\")\n utils.log_statement(table)\n \n # Plot from round.\n plotting.heatmap(running_avg_p_xy, average_p_xy, i)\n plotting.heatmap1(running_avg_p_baseline_xy, i)\n \n if i == states_visited_indexes[3]:\n plotting.states_visited_over_time_multi(states_visited_cumulative, \n states_visited_cumulative_baseline, \n states_visited_indexes)\n \n # save final expert weights to use with the trained oracles.\n weights_file = experiment_directory + '/policy_weights'\n np.save(weights_file, weights)\n \n # cumulative plots.\n plotting.running_average_entropy(running_avg_entropies, running_avg_entropies_baseline)\n plotting.running_average_entropy(running_avg_entropies_xy, running_avg_entropies_baseline_xy, ext='_xy')\n plotting.running_average_entropy(running_avg_cumul_entropies, running_avg_cumul_entropies_baseline, ext='_cumulative_xy') \n \n plotting.heatmap4(running_avg_ps_xy, running_avg_ps_baseline_xy, indexes, ext=\"cumulative\")\n plotting.heatmap4(avg_ps_xy, avg_ps_baseline_xy, indexes, ext=\"epoch\")\n \n plotting.percent_state_space_reached(pct_visited, pct_visited_baseline, ext='_total')\n plotting.percent_state_space_reached(pct_visited_xy, pct_visited_xy_baseline, ext=\"_xy\")\n \n return policies\n\ndef main():\n\n # Suppress scientific notation.\n np.set_printoptions(suppress=True, edgeitems=100, linewidth=150, precision=8)\n\n # Make environment.\n env = gym.make(args.env)\n env.seed(int(time.time())) # seed environment\n\n TIME = datetime.now().strftime('%Y_%m_%d-%H-%M')\n plotting.FIG_DIR = 'figs/'\n plotting.model_time = args.exp_name + '/'\n if not os.path.exists(plotting.FIG_DIR+plotting.model_time):\n os.makedirs(plotting.FIG_DIR+plotting.model_time)\n\n policies = collect_entropy_policies(env, args.epochs, args.T)\n env.close()\n\n print(\"*** ---------- ***\")\n print(\"DONE\")\n\nif __name__ == \"__main__\":\n main()\n\n\n", "sub_path": "walker/walker_collect_sac.py", "file_name": "walker_collect_sac.py", "file_ext": "py", "file_size_in_byte": 20403, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 8, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 8, "usage_type": "call"}, {"api_name": "utils.get_args", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 38, "usage_type": "call"}, {"api_name": "walker_utils.num_states_2d", "line_number": 38, "usage_type": "attribute"}, {"api_name": "walker_utils.qpos", "line_number": 46, "usage_type": "attribute"}, {"api_name": "walker_utils.qpos", "line_number": 47, "usage_type": "attribute"}, {"api_name": "walker_utils.get_state", "line_number": 49, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 52, "usage_type": "call"}, {"api_name": "walker_utils.action_dim", "line_number": 52, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 53, "usage_type": "call"}, {"api_name": "walker_utils.get_state", "line_number": 62, "usage_type": "call"}, {"api_name": "walker_utils.discretize_state_2d", "line_number": 65, "usage_type": "call"}, {"api_name": "walker_utils.discretize_state_2d", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 86, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 102, "usage_type": "attribute"}, {"api_name": "walker_utils.get_state", "line_number": 117, "usage_type": "call"}, {"api_name": "walker_utils.discretize_state", "line_number": 120, "usage_type": "call"}, {"api_name": "walker_utils.discretize_state_2d", "line_number": 123, "usage_type": "call"}, {"api_name": "walker_utils.discretize_state", "line_number": 127, "usage_type": "call"}, {"api_name": "walker_utils.discretize_state_2d", "line_number": 128, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 138, "usage_type": "call"}, {"api_name": "gym.wrappers.Monitor", "line_number": 139, "usage_type": "call"}, {"api_name": "gym.wrappers", "line_number": 139, "usage_type": "name"}, {"api_name": "walker_utils.qpos", "line_number": 142, "usage_type": "attribute"}, {"api_name": "walker_utils.qpos", "line_number": 143, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 159, "usage_type": "call"}, {"api_name": "walker_utils.num_states", "line_number": 159, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 160, "usage_type": "call"}, {"api_name": "walker_utils.num_states_2d", "line_number": 160, "usage_type": "attribute"}, {"api_name": "gym.wrappers.Monitor", "line_number": 185, "usage_type": "call"}, {"api_name": "gym.wrappers", "line_number": 185, "usage_type": "name"}, {"api_name": "walker_utils.qpos", "line_number": 187, "usage_type": "attribute"}, {"api_name": "walker_utils.qpos", "line_number": 188, "usage_type": "attribute"}, {"api_name": "walker_utils.get_state", "line_number": 190, "usage_type": "call"}, {"api_name": "walker_utils.get_state", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 214, "usage_type": "call"}, {"api_name": "walker_utils.total_state_space", "line_number": 214, "usage_type": "attribute"}, {"api_name": "walker_utils.qpos", "line_number": 218, "usage_type": "attribute"}, {"api_name": "walker_utils.qvel", "line_number": 218, "usage_type": "attribute"}, {"api_name": "utils.log_statement", "line_number": 223, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 229, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 242, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 246, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 247, "usage_type": "call"}, {"api_name": "os.path", "line_number": 247, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 248, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 250, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 259, "usage_type": "call"}, {"api_name": "walker_utils.num_states", "line_number": 259, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 260, "usage_type": "call"}, {"api_name": "walker_utils.num_states_2d", "line_number": 260, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 265, "usage_type": "call"}, {"api_name": "walker_utils.num_states", "line_number": 265, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 266, "usage_type": "call"}, {"api_name": "walker_utils.num_states_2d", "line_number": 266, "usage_type": "attribute"}, {"api_name": "experience_buffer.ExperienceBuffer", "line_number": 293, "usage_type": "call"}, {"api_name": "walker_utils.get_state", "line_number": 298, "usage_type": "call"}, {"api_name": "utils.log_statement", "line_number": 305, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 310, "usage_type": "call"}, {"api_name": "walker_utils.num_states", "line_number": 310, "usage_type": "attribute"}, {"api_name": "utils.log_statement", "line_number": 313, "usage_type": "call"}, {"api_name": "utils.log_statement", "line_number": 319, "usage_type": "call"}, {"api_name": "utils.log_statement", "line_number": 320, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 320, "usage_type": "call"}, {"api_name": "spinup.utils.run_utils.setup_logger_kwargs", "line_number": 322, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 326, "usage_type": "call"}, {"api_name": "walker_soft_actor_critic.WalkerSoftActorCritic", "line_number": 327, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 327, "usage_type": "call"}, {"api_name": "utils.get_weights", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 379, "usage_type": "call"}, {"api_name": "walker_utils.total_state_space", "line_number": 379, "usage_type": "attribute"}, {"api_name": "numpy.copy", "line_number": 392, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 393, "usage_type": "call"}, {"api_name": "plotting.states_visited_over_time", "line_number": 398, "usage_type": "call"}, {"api_name": "plotting.states_visited_over_time", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 438, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 439, "usage_type": "call"}, {"api_name": "utils.log_statement", "line_number": 441, "usage_type": "call"}, {"api_name": "utils.log_statement", "line_number": 442, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 445, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 447, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 450, "usage_type": "call"}, {"api_name": "numpy.count_nonzero", "line_number": 452, "usage_type": "call"}, {"api_name": "tabulate.tabulate", "line_number": 469, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 469, "usage_type": "call"}, {"api_name": "utils.log_statement", "line_number": 470, "usage_type": "call"}, {"api_name": "plotting.heatmap", "line_number": 473, "usage_type": "call"}, {"api_name": "plotting.heatmap1", "line_number": 474, "usage_type": "call"}, {"api_name": "plotting.states_visited_over_time_multi", "line_number": 477, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 483, "usage_type": "call"}, {"api_name": "plotting.running_average_entropy", "line_number": 486, "usage_type": "call"}, {"api_name": "plotting.running_average_entropy", "line_number": 487, "usage_type": "call"}, {"api_name": "plotting.running_average_entropy", "line_number": 488, "usage_type": "call"}, {"api_name": "plotting.heatmap4", "line_number": 490, "usage_type": "call"}, {"api_name": "plotting.heatmap4", "line_number": 491, "usage_type": "call"}, {"api_name": "plotting.percent_state_space_reached", "line_number": 493, "usage_type": "call"}, {"api_name": "plotting.percent_state_space_reached", "line_number": 494, "usage_type": "call"}, {"api_name": "numpy.set_printoptions", "line_number": 501, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 504, "usage_type": "call"}, {"api_name": "time.time", "line_number": 505, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 507, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 507, "usage_type": "name"}, {"api_name": "plotting.FIG_DIR", "line_number": 508, "usage_type": "attribute"}, {"api_name": "plotting.model_time", "line_number": 509, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 510, "usage_type": "call"}, {"api_name": "os.path", "line_number": 510, "usage_type": "attribute"}, {"api_name": "plotting.FIG_DIR", "line_number": 510, "usage_type": "attribute"}, {"api_name": "plotting.model_time", "line_number": 510, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 511, "usage_type": "call"}, {"api_name": "plotting.FIG_DIR", "line_number": 511, "usage_type": "attribute"}, {"api_name": "plotting.model_time", "line_number": 511, "usage_type": "attribute"}]} +{"seq_id": "603764127", "text": "import logging\nfrom abc import abstractmethod, ABC\nfrom typing import Iterable\ntry:\n from ruamel_yaml import YAML\nexcept ModuleNotFoundError:\n from ruamel.yaml import YAML\n\nfrom mspypeline.helpers import get_logger, DataDict\n\n\nclass BaseReader(ABC):\n def __init__(self, start_dir: str, reader_config: dict, loglevel=logging.DEBUG):\n self.full_data = DataDict(data_source=self)\n self.start_dir = start_dir\n self.reader_config = reader_config\n self.logger = get_logger(self.__class__.__name__, loglevel)\n\n # log which files will be read\n self.logger.info(\"Required files: %s\", self.required_files)\n\n if not reader_config:\n self.logger.warning(\"Empty configs\")\n else:\n self.logger.debug(\"Got configs: %s\", self.reader_config)\n if start_dir is None:\n raise ValueError(\"Invalid starting dir\")\n\n @property\n @classmethod\n @abstractmethod\n def name(cls) -> str:\n raise NotImplementedError\n\n @property\n @classmethod\n @abstractmethod\n def required_files(cls) -> Iterable[str]:\n raise NotImplementedError\n\n @property\n @classmethod\n @abstractmethod\n def plotter(cls): # -> Type[BasePlotter]\n raise NotImplementedError\n\n\nclass MissingFilesException(Exception):\n pass\n\n\nif __name__ == \"__main__\":\n # minimal example of a class implementing the BaseReader\n class Reader(BaseReader):\n name = \"reader\" # this is the name of the reader in the yaml file\n required_files = [] # this is a list of strings of all files that should be parsed\n\n def __init__(self, start_dir, reader_config, loglevel):\n super().__init__(start_dir, reader_config, loglevel)\n for file in Reader.required_files:\n self.full_data[file] = [0, 0, 10] # this should be the data from the file\n\n\n r = Reader(\"\", {}, 10)\n", "sub_path": "mspypeline/file_reader/BaseReader.py", "file_name": "BaseReader.py", "file_ext": "py", "file_size_in_byte": 1904, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "abc.ABC", "line_number": 12, "usage_type": "name"}, {"api_name": "logging.DEBUG", "line_number": 13, "usage_type": "attribute"}, {"api_name": "mspypeline.helpers.DataDict", "line_number": 14, "usage_type": "call"}, {"api_name": "mspypeline.helpers.get_logger", "line_number": 17, "usage_type": "call"}, {"api_name": "abc.abstractmethod", "line_number": 31, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 37, "usage_type": "name"}, {"api_name": "typing.Iterable", "line_number": 38, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "217811458", "text": "import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport psycopg2\nfrom sklearn.model_selection import train_test_split\nimport pydotplus\nfrom sklearn import tree\nfrom pylab import *\nmpl.rcParams['font.sans-serif'] = ['SimHei']\n#显示所有列\npd.set_option('display.max_columns', None)\n#连接数据库\nconn = psycopg2.connect(database='dbgzt', user='postgres', password='admin', host='127.0.0.1', port='5432')\n#创建cursor以访问数据库\ncur = conn.cursor()\n#设置数据库字段名\ntb_user_10_11='user_10_11_2'\ndata_path=\"/data/counsu10.csv\"\n\ndef get_consu10_fromdata(div1,div2,div3,div4):\n cur.execute(\"SELECT * FROM \"+tb_user_10_11)\n rows_return = cur.fetchall()\n rows_return_df=pd.DataFrame(rows_return,columns=['user_id','counton','summoney','delconsum','leave','recharge'])\n #数据离散化\n rows_return_df['counton_div']=pd.qcut(rows_return_df.loc[:,'counton'],div1,labels=list(range(div1)))\n rows_return_df['summoney_div'] = pd.qcut(rows_return_df.loc[:, 'summoney'], div2,labels=list(range(div2)))\n rows_return_df['delconsum_div'] = pd.qcut(rows_return_df.loc[:, 'delconsum'], div3,labels=list(range(div3)))\n rows_return_df['recharge_div'] = pd.cut(rows_return_df.loc[:, 'recharge'], div4, labels=list(range(div4)))\n print(rows_return_df.describe())\n print(rows_return_df.corr(method='pearson'))\n rows_return_df.drop(['counton','summoney','delconsum','recharge'],axis=1,inplace=True)\n print(rows_return_df.head())\n\n return rows_return_df\n\n\n\nif __name__ == '__main__':\n # -*- coding:utf-8 -*-\n data=get_consu10_fromdata(5,5,5,5)\n #筛选线性变量\n\n print(data.describe())\n\n\n print(\"-----------\")\n #划分训练集和测试集\n X=data.loc[:,['recharge_div','counton_div','summoney_div','delconsum_div']]\n y=data.loc[:,'leave']\n X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=42)\n # 训练模型\n clf = tree.DecisionTreeClassifier(criterion='entropy', max_depth=10, min_samples_split=20)\n clf.fit(X, y)\n # 保存模型\n with open(\"gzt.dot\", 'w') as f:\n f = tree.export_graphviz(clf,out_file=f,\n feature_names=['recharge_div','counton_div','summoney_div','delconsum_div'],\n class_names=['notleave','leave']\n )\n\n pre=clf.predict(X_test)\n correct = [1 if ((a == 1 and b == 1) or (a == 0 and b == 0)) else 0 for (a, b) in zip(pre, y_test)]\n accuracy = (sum(correct) / len(correct))\n print(accuracy)\n #评估模型\n import sklearn.metrics as metrics\n print(metrics.confusion_matrix(y_test, pre, labels=[0, 1]))\n print(metrics.classification_report(y_test, pre))\n conn.close()\n\n\n", "sub_path": "manager_datasource.py", "file_name": "manager_datasource.py", "file_ext": "py", "file_size_in_byte": 2711, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pandas.set_option", "line_number": 11, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 13, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 23, "usage_type": "call"}, {"api_name": "pandas.qcut", "line_number": 25, "usage_type": "call"}, {"api_name": "pandas.qcut", "line_number": 26, "usage_type": "call"}, {"api_name": "pandas.qcut", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 28, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 50, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 52, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 52, "usage_type": "name"}, {"api_name": "sklearn.tree.export_graphviz", "line_number": 56, "usage_type": "call"}, {"api_name": "sklearn.tree", "line_number": 56, "usage_type": "name"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 67, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 67, "usage_type": "name"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 68, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 68, "usage_type": "name"}]} +{"seq_id": "551054547", "text": "# Copyright (c) 2020 Xcodz.\n# All Rights Reserved.\n\nfrom __future__ import annotations\n\nimport copy\nimport math\nfrom typing import NewType\n\n__version__ = \"2020.6.4\"\n__author__ = \"Xcodz\"\n\nModelType = NewType(\"ModelType\", list[tuple[tuple[int]]])\n\n\ndef flatten(x: float, y: float, z: float, scale: int, distance: int) -> tuple[float, float]:\n \"\"\"\n Converts 3d point to a 2d drawable point\n\n >>> flatten(1, 2, 3, 10, 10)\n (7.6923076923076925, 15.384615384615385)\n \"\"\"\n projected_x = ((x * distance) / (z + distance)) * scale\n projected_y = ((y * distance) / (z + distance)) * scale\n return projected_x, projected_y\n\n\ndef model_rotate(model, axis, angle) -> ModelType:\n \"\"\"Rotate a model\"\"\"\n d = copy.deepcopy(model)\n for x in range(len(d)):\n p1, p2 = d[x]\n n = (rotate(p1[0], p1[1], p1[2], axis, angle), rotate(p2[0], p2[1], p2[2], axis, angle))\n d[x] = n\n return d\n\n\ndef model_flatten(model, scale, distance) -> ModelType:\n \"\"\"flatten complete model\"\"\"\n d = copy.deepcopy(model)\n for x in range(len(d)):\n p1, p2 = d[x]\n n = (flatten(p1[0], p1[1], p1[2], scale, distance), flatten(p2[0], p2[1], p2[2], scale, distance))\n d[x] = n\n return d\n\n\ndef rotate(x: int, y: int, z: int, axis: str, angle: int):\n \"\"\"\n rotate a point around a certain axis with a certain angle\n angler can be any integer between 1, 360\n\n >>> rotate(1, 2, 3, 'y', 90)\n (3.130524675073759, 2, 0.4470070007889556)\n \"\"\"\n if angle > 360 or angle < 0:\n raise ValueError(\"Angle is supposed to be in between 0, 360\")\n if type(x) is not int:\n raise TypeError(\"x must be int\")\n if type(y) is not int:\n raise TypeError(\"y must be int\")\n if type(z) is not int:\n raise TypeError(\"z must be int\")\n angle = angle / 450 * 180 / math.pi\n if axis == 'z':\n newX = x * math.cos(angle) - y * math.sin(angle)\n newY = y * math.cos(angle) + x * math.sin(angle)\n newZ = z\n elif axis == 'x':\n newY = y * math.cos(angle) - z * math.sin(angle)\n newZ = z * math.cos(angle) + y * math.sin(angle)\n newX = x\n elif axis == 'y':\n newX = x * math.cos(angle) - z * math.sin(angle)\n newZ = z * math.cos(angle) + x * math.sin(angle)\n newY = y\n else:\n raise ValueError('not a valid axis')\n nx = newX\n ny = newY\n nz = newZ\n return nx, ny, nz\n\n\nclass ModelMake:\n def cube(x, y, z, s=1):\n mcube = [((x, y, z), (x + s, y, z)),\n ((x, y, z), (x, y + s, z)),\n ((x, y, z), (x, y, z + s)),\n ((x, y, z + s), (x + s, y, z + s)),\n ((x, y, z + s), (x, y + s, z + s)),\n ((x + s, y, z + s), (x + s, y, z)),\n ((x + s, y, z + s), (x + s, y + s, z + s)),\n ((x + s, y, z), (x + s, y + s, z)),\n ((x, y + s, z + s), (x + s, y + s, z + s)),\n ((x + s, y + s, z + s), (x + s, y + s, z)),\n ((x, y + s, z + s), (x, y + s, z)),\n ((x, y + s, z), (x + s, y + s, z))]\n return mcube\n\n\ndef model_dump_to_file(model_file, model):\n with open(model_file, 'w') as f:\n for segment in model:\n coord1, coord2 = segment\n f.write( \"{} {} {}:{} {} {}\\n\".format(*coord1, *coord2))\n\n\ndef model_load_from_file(model_file):\n f = open(model_file).readlines()\n model = []\n for x in f:\n p1s, p2s = x.split(':', 1)\n p11, p12, p13 = p1s.split(' ', 2)\n p21, p22, p23 = p2s.split(' ', 2)\n p11, p12, p13 = float(p11), float(p12), float(p13)\n p21, p22, p23 = float(p21), float(p22), float(p23)\n\n n = ((p11, p12, p13), (p21, p22, p23))\n model.append(n)\n return model\n\n\ndef main():\n import pygame, sys\n sys.argv.append(\"rectangle.txt\")\n model_dump_to_file(\"rectangle.txt\", ModelMake.cube(-1, -1, -1, 2))\n pygame.init()\n fpsclock = pygame.time.Clock()\n disp = pygame.display.set_mode((600, 400))\n cube3 = model_load_from_file(sys.argv[1])\n rotate = False\n rotate2 = False\n scale = 1\n distance = 10\n r = 1\n r2 = 1\n a = 1\n iskd = False\n gevent = None\n while True:\n if rotate:\n cube3 = model_rotate(cube3, \"y\", r * a)\n rotate = False\n if rotate2:\n cube3 = model_rotate(cube3, \"x\", r2 * a)\n rotate2 = False\n cube2 = model_flatten(cube3, scale, distance)\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n raise SystemExit(0)\n if event.type == pygame.KEYUP:\n iskd = False\n if event.type == pygame.KEYDOWN or iskd:\n iskd = True\n gevent = event\n if iskd and gevent != None:\n if gevent.type == pygame.KEYDOWN:\n event = gevent\n if event.key == pygame.K_UP or event.key == pygame.K_DOWN:\n rotate2 = True\n if event.key != pygame.K_UP:\n r2 = 0.2\n else:\n r2 = -0.2\n elif event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\n rotate = True\n if event.key != pygame.K_LEFT:\n r = 0.2\n else:\n r = -0.2\n elif event.key == pygame.K_a:\n scale -= 1\n elif event.key == pygame.K_d:\n scale += 1\n elif event.key == pygame.K_w:\n distance -= 1\n elif event.key == pygame.K_s:\n distance += 1\n elif event.key == pygame.K_z:\n cube3 = model_rotate(cube3, \"z\", 0.2 * a)\n elif event.key == pygame.K_x:\n cube3 = model_rotate(cube3, \"z\", -0.2 * a)\n elif event.key == pygame.K_q:\n a -= 1\n elif event.key == pygame.K_e:\n a += 1\n disp.fill((255, 255, 255))\n\n if distance == 0:\n distance = 1\n for x, y in cube2:\n pygame.draw.aaline(disp, (0, 0, 0), (x[0] + 300, x[1] + 200), (y[0] + 300, y[1] + 200))\n\n pygame.display.update()\n fpsclock.tick(10)\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "denver/graphics3d.py", "file_name": "graphics3d.py", "file_ext": "py", "file_size_in_byte": 6414, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "typing.NewType", "line_number": 13, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 30, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 40, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 64, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 66, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 66, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 67, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 67, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 70, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 70, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 71, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 71, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 74, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 74, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 75, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 75, "usage_type": "call"}, {"api_name": "sys.argv.append", "line_number": 126, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 128, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 129, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 129, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 130, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 130, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 149, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 149, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 150, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 151, "usage_type": "call"}, {"api_name": "pygame.KEYUP", "line_number": 153, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 155, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 159, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 161, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 161, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 163, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 167, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 167, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 169, "usage_type": "attribute"}, {"api_name": "pygame.K_a", "line_number": 173, "usage_type": "attribute"}, {"api_name": "pygame.K_d", "line_number": 175, "usage_type": "attribute"}, {"api_name": "pygame.K_w", "line_number": 177, "usage_type": "attribute"}, {"api_name": "pygame.K_s", "line_number": 179, "usage_type": "attribute"}, {"api_name": "pygame.K_z", "line_number": 181, "usage_type": "attribute"}, {"api_name": "pygame.K_x", "line_number": 183, "usage_type": "attribute"}, {"api_name": "pygame.K_q", "line_number": 185, "usage_type": "attribute"}, {"api_name": "pygame.K_e", "line_number": 187, "usage_type": "attribute"}, {"api_name": "pygame.draw.aaline", "line_number": 194, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 194, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 196, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 196, "usage_type": "attribute"}]} +{"seq_id": "625263656", "text": "#!/usr/bin/env python\n\n\"\"\"\n================================================\nPyrad: The MeteoSwiss Radar Processing framework\n================================================\n\nWelcome to Pyrad!\n\nTo run the processing framework type:\n python main_process_data.py \\\n[config_file] [process_start_time] [process_end_time]\nExample:\n python main_process_data.py \\\n'/home/lom/users/fvj/pyrad/config/processing/paradiso_fvj_vol.txt' \\\n'20140523000000' '20140523001000'\n\n\"\"\"\n\n# Author: fvj\n# License: BSD 3 clause\n\nimport datetime\nimport argparse\nimport os\n\nimport pyart\nimport pyrad\n\nprint(__doc__)\n\n\nif __name__ == '__main__':\n\n # parse the arguments\n parser = argparse.ArgumentParser(\n description='Entry to Pyrad processing framework')\n\n # positional arguments\n parser.add_argument(\n 'proccfgfile', type=str, help='path to main configuration file')\n parser.add_argument(\n 'starttime', type=str,\n help='starting time of the data to be processed')\n parser.add_argument(\n 'endtime', type=str, help='end time of the data to be processed ')\n\n args = parser.parse_args()\n\n print('config file: '+args.proccfgfile)\n print('start time: '+args.starttime)\n print('end time: '+args.endtime)\n\n proc_starttime = datetime.datetime.strptime(\n args.starttime, '%Y%m%d%H%M%S')\n proc_endtime = datetime.datetime.strptime(args.endtime, '%Y%m%d%H%M%S')\n\n # create config dictionary\n cfg = dict({'configFile': args.proccfgfile})\n cfg = pyrad.io.read_config(cfg['configFile'], cfg=cfg)\n cfg = pyrad.io.read_config(cfg['locationConfigFile'], cfg=cfg)\n cfg = pyrad.io.read_config(cfg['productConfigFile'], cfg=cfg)\n\n # configuration dictionary to figure out where the data is\n datacfg = dict({'datapath': cfg['datapath']})\n datacfg.update({'ScanList': cfg['ScanList']})\n if 'cosmopath' in cfg:\n datacfg.update({'cosmopath': cfg['cosmopath']})\n else:\n datacfg.update({'cosmopath': None})\n if 'dempath' in cfg:\n datacfg.update({'dempath': cfg['dempath']})\n else:\n datacfg.update({'dempath': None})\n if 'loadbasepath' in cfg:\n datacfg.update({'loadbasepath': cfg['loadbasepath']})\n else:\n datacfg.update({'loadbasepath': None})\n if 'loadname' in cfg:\n datacfg.update({'loadname': cfg['loadname']})\n else:\n datacfg.update({'loadname': None})\n if 'RadarName' in cfg:\n datacfg.update({'RadarName': cfg['RadarName']})\n else:\n datacfg.update({'RadarName': None})\n if 'RadarRes' in cfg:\n datacfg.update({'RadarRes': cfg['RadarRes']})\n else:\n datacfg.update({'RadarRes': None})\n if 'ScanPeriod' in cfg:\n datacfg.update({'ScanPeriod': int(cfg['ScanPeriod'])})\n else:\n print(\n 'WARNING: Scan period not specified. \\\n Assumed default value 5 min')\n datacfg.update({'ScanPeriod': 5})\n if 'CosmoRunFreq' in cfg:\n datacfg.update({'CosmoRunFreq': int(cfg['CosmoRunFreq'])})\n else:\n print(\n 'WARNING: COSMO run frequency not specified. \\\n Assumed default value 3h')\n datacfg.update({'CosmoRunFreq': 3})\n if 'CosmoForecasted' in cfg:\n datacfg.update({'CosmoForecasted': int(cfg['CosmoForecasted'])})\n else:\n print(\n 'WARNING: Hours forecasted by COSMO not specified. \\\n Assumed default value 7h (including analysis)')\n datacfg.update({'CosmoForecasted': 7})\n\n # get unique initial data types list\n datatypesdescr = set()\n\n for datasetdescr in cfg['dataSetList']:\n proclevel, dataset = pyrad.io.get_datasetfields(datasetdescr)\n if isinstance(cfg[dataset]['datatype'], str):\n datagroup, datatype, dataset_save, product_save = (\n pyrad.io.get_datatypefields(cfg[dataset]['datatype']))\n if datagroup != 'PROC':\n datatypesdescr.add(cfg[dataset]['datatype'])\n else:\n for datatype in cfg[dataset]['datatype']:\n datagroup, datatype_aux, dataset_save, product_save = (\n pyrad.io.get_datatypefields(datatype))\n if datagroup != 'PROC':\n datatypesdescr.add(datatype)\n\n datatypesdescr = list(datatypesdescr)\n\n # get number of processing levels and datasets corresponding to\n # each processing level\n dataset_levels = dict({'l0': list()})\n for datasetdescr in cfg['dataSetList']:\n proclevel, dataset = pyrad.io.get_datasetfields(datasetdescr)\n if proclevel in dataset_levels:\n dataset_levels[proclevel].append(dataset)\n else:\n dataset_levels.update({proclevel: [dataset]})\n\n # get lists of files to process using as reference a master scan\n for datatypedescr in datatypesdescr:\n datagroup, datatype, dataset, product = pyrad.io.get_datatypefields(\n datatypedescr)\n if (datagroup != 'COSMO') and (datagroup != 'RAD4ALPCOSMO'):\n masterdatatypedescr = datatypedescr\n masterscan = cfg['ScanList'][0]\n break\n\n masterfilelist = pyrad.io.get_file_list(\n masterscan, masterdatatypedescr, proc_starttime, proc_endtime,\n datacfg)\n nvolumes = len(masterfilelist)\n if nvolumes == 0:\n raise Exception(\n \"ERROR: Could not find any volume within the specified times\")\n print('Number of volumes to process: '+str(nvolumes)+'\\n\\n')\n\n # initial processing of the datasets\n print('\\n\\nInitializing datasets:')\n for level in sorted(dataset_levels):\n print('\\nProcess level: '+level)\n for dataset in dataset_levels[level]:\n print('Processing dataset: '+dataset)\n\n dscfg = cfg[dataset]\n if 'MAKE_GLOBAL' not in dscfg:\n dscfg.update({'MAKE_GLOBAL': 0})\n\n proc_func_name, dsformat = pyrad.proc.get_process_type(\n dscfg['type'])\n proc_func = getattr(pyrad.proc, proc_func_name)\n new_dataset = proc_func(0, dscfg, radar=None)\n\n if new_dataset is not None:\n if dscfg['MAKE_GLOBAL'] == 1:\n for field in new_dataset.fields:\n print('Adding field: '+field)\n radar.add_field(\n field, new_dataset.fields[field],\n replace_existing=True)\n\n # create the data set products\n if 'products' in cfg[dataset]:\n for product in cfg[dataset]['products']:\n prdcfg = cfg[dataset]['products'][product]\n prdcfg.update({'procname': cfg['name']})\n prdcfg.update({'basepath': cfg['saveimgbasepath']})\n prdcfg.update({'imgformat': cfg['imgformat']})\n prdcfg.update({'convertformat': cfg['convertformat']})\n prdcfg.update(\n {'ppiImageConfig': cfg['ppiImageConfig']})\n prdcfg.update(\n {'rhiImageConfig': cfg['rhiImageConfig']})\n prdcfg.update({'dsname': dataset})\n prdcfg.update({'dstype': cfg[dataset]['type']})\n prdcfg.update({'prdname': product})\n prdcfg.update({'timeinfo': voltime})\n\n prod_func_name = pyrad.proc.get_product_type(dsformat)\n prod_func = getattr(pyrad.proc, prod_func_name)\n result = prod_func(new_dataset, prdcfg)\n\n # process all data files in file list\n for masterfile in masterfilelist:\n print('\\n\\nmaster file: '+os.path.basename(masterfile))\n voltime = pyrad.io.get_datetime(masterfile, masterdatatypedescr)\n\n # get all raw data\n radar = pyrad.io.get_data(voltime, datatypesdescr, datacfg)\n\n # process all data sets\n for level in sorted(dataset_levels):\n print('\\nProcess level: '+level)\n for dataset in dataset_levels[level]:\n print('Processing dataset: '+dataset)\n\n dscfg = cfg[dataset]\n if 'MAKE_GLOBAL' not in dscfg:\n dscfg.update({'MAKE_GLOBAL': 0})\n\n proc_func_name, dsformat = pyrad.proc.get_process_type(\n dscfg['type'])\n proc_func = getattr(pyrad.proc, proc_func_name)\n new_dataset = proc_func(1, dscfg, radar=radar)\n\n if new_dataset is not None:\n if dscfg['MAKE_GLOBAL'] == 1:\n for field in new_dataset.fields:\n print('Adding field: '+field)\n radar.add_field(\n field, new_dataset.fields[field],\n replace_existing=True)\n\n # create the data set products\n if 'products' in cfg[dataset]:\n for product in cfg[dataset]['products']:\n prdcfg = cfg[dataset]['products'][product]\n prdcfg.update({'procname': cfg['name']})\n prdcfg.update(\n {'basepath': cfg['saveimgbasepath']})\n prdcfg.update({'imgformat': cfg['imgformat']})\n prdcfg.update(\n {'convertformat': cfg['convertformat']})\n prdcfg.update(\n {'ppiImageConfig': cfg['ppiImageConfig']})\n prdcfg.update(\n {'rhiImageConfig': cfg['rhiImageConfig']})\n prdcfg.update({'dsname': dataset})\n prdcfg.update({'dstype': cfg[dataset]['type']})\n prdcfg.update({'prdname': product})\n prdcfg.update({'timeinfo': voltime})\n\n prod_func_name = pyrad.proc.get_product_type(\n dsformat)\n prod_func = getattr(pyrad.proc, prod_func_name)\n result = prod_func(new_dataset, prdcfg)\n\n # post-processing of the datasets\n print('\\n\\nPost-processing datasets')\n for level in sorted(dataset_levels):\n print('\\nProcess level: '+level)\n for dataset in dataset_levels[level]:\n print('Processing dataset: '+dataset)\n\n dscfg = cfg[dataset]\n if 'MAKE_GLOBAL' not in dscfg:\n dscfg.update({'MAKE_GLOBAL': 0})\n\n proc_func_name, dsformat = pyrad.proc.get_process_type(\n dscfg['type'])\n proc_func = getattr(pyrad.proc, proc_func_name)\n new_dataset = proc_func(2, dscfg, radar=None)\n\n if new_dataset is not None:\n if dscfg['MAKE_GLOBAL'] == 1:\n for field in new_dataset.fields:\n print('Adding field: '+field)\n radar.add_field(\n field, new_dataset.fields[field],\n replace_existing=True)\n\n # create the data set products\n if 'products' in cfg[dataset]:\n for product in cfg[dataset]['products']:\n prdcfg = cfg[dataset]['products'][product]\n prdcfg.update({'procname': cfg['name']})\n prdcfg.update({'basepath': cfg['saveimgbasepath']})\n prdcfg.update({'imgformat': cfg['imgformat']})\n prdcfg.update({'convertformat': cfg['convertformat']})\n prdcfg.update(\n {'ppiImageConfig': cfg['ppiImageConfig']})\n prdcfg.update(\n {'rhiImageConfig': cfg['rhiImageConfig']})\n prdcfg.update({'dsname': dataset})\n prdcfg.update({'dstype': cfg[dataset]['type']})\n prdcfg.update({'prdname': product})\n prdcfg.update({'timeinfo': voltime})\n\n prod_func_name = pyrad.proc.get_product_type(dsformat)\n prod_func = getattr(pyrad.proc, prod_func_name)\n result = prod_func(new_dataset, prdcfg)\n\n print('\\n\\n\\nThis is the end my friend! See you soon!')\n", "sub_path": "src/pyrad_proc/scripts/main_process_data.py", "file_name": "main_process_data.py", "file_ext": "py", "file_size_in_byte": 12517, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 54, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 56, "usage_type": "attribute"}, {"api_name": "pyrad.io.read_config", "line_number": 60, "usage_type": "call"}, {"api_name": "pyrad.io", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pyrad.io.read_config", "line_number": 61, "usage_type": "call"}, {"api_name": "pyrad.io", "line_number": 61, "usage_type": "attribute"}, {"api_name": "pyrad.io.read_config", "line_number": 62, "usage_type": "call"}, {"api_name": "pyrad.io", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pyrad.io.get_datasetfields", "line_number": 117, "usage_type": "call"}, {"api_name": "pyrad.io", "line_number": 117, "usage_type": "attribute"}, {"api_name": "pyrad.io.get_datatypefields", "line_number": 120, "usage_type": "call"}, {"api_name": "pyrad.io", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pyrad.io.get_datatypefields", "line_number": 126, "usage_type": "call"}, {"api_name": "pyrad.io", "line_number": 126, "usage_type": "attribute"}, {"api_name": "pyrad.io.get_datasetfields", "line_number": 136, "usage_type": "call"}, {"api_name": "pyrad.io", "line_number": 136, "usage_type": "attribute"}, {"api_name": "pyrad.io.get_datatypefields", "line_number": 144, "usage_type": "call"}, {"api_name": "pyrad.io", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pyrad.io.get_file_list", "line_number": 151, "usage_type": "call"}, {"api_name": "pyrad.io", "line_number": 151, "usage_type": "attribute"}, {"api_name": "pyrad.proc.get_process_type", "line_number": 171, "usage_type": "call"}, {"api_name": "pyrad.proc", "line_number": 171, "usage_type": "attribute"}, {"api_name": "pyrad.proc", "line_number": 173, "usage_type": "attribute"}, {"api_name": "pyrad.proc.get_product_type", "line_number": 201, "usage_type": "call"}, {"api_name": "pyrad.proc", "line_number": 201, "usage_type": "attribute"}, {"api_name": "pyrad.proc", "line_number": 202, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path", "line_number": 207, "usage_type": "attribute"}, {"api_name": "pyrad.io.get_datetime", "line_number": 208, "usage_type": "call"}, {"api_name": "pyrad.io", "line_number": 208, "usage_type": "attribute"}, {"api_name": "pyrad.io.get_data", "line_number": 211, "usage_type": "call"}, {"api_name": "pyrad.io", "line_number": 211, "usage_type": "attribute"}, {"api_name": "pyrad.proc.get_process_type", "line_number": 223, "usage_type": "call"}, {"api_name": "pyrad.proc", "line_number": 223, "usage_type": "attribute"}, {"api_name": "pyrad.proc", "line_number": 225, "usage_type": "attribute"}, {"api_name": "pyrad.proc.get_product_type", "line_number": 255, "usage_type": "call"}, {"api_name": "pyrad.proc", "line_number": 255, "usage_type": "attribute"}, {"api_name": "pyrad.proc", "line_number": 257, "usage_type": "attribute"}, {"api_name": "pyrad.proc.get_process_type", "line_number": 271, "usage_type": "call"}, {"api_name": "pyrad.proc", "line_number": 271, "usage_type": "attribute"}, {"api_name": "pyrad.proc", "line_number": 273, "usage_type": "attribute"}, {"api_name": "pyrad.proc.get_product_type", "line_number": 301, "usage_type": "call"}, {"api_name": "pyrad.proc", "line_number": 301, "usage_type": "attribute"}, {"api_name": "pyrad.proc", "line_number": 302, "usage_type": "attribute"}]} +{"seq_id": "30991044", "text": "from selenium import webdriver\nimport random\nimport subprocess\nimport time\nimport csv\nimport os\n\n##### Folder and File Settings #####\nos.chdir(r'/path/to/working/directory')\nexported_file = 'exported_file.csv'\n\n##### Headless Browser Settings #####\noptions = webdriver.ChromeOptions()\noptions.add_argument('headless')\npath_to_chromedriver = r'/path/to/chromedriver' \ndriver = webdriver.Chrome(executable_path = path_to_chromedriver,chrome_options = options)\n\n###### Variables ######\nurls = ['www.example.com/page1','www.example.com/page2','www.example.com/page3']\nclassName = 'name-of-class-or-id-of-element'\nnew_dataset = []\n\n###### Scraper ######\nfor url in urls:\n\tscraped_data = []\n\tprint(\"Scraping %d out of %d urls\" % (urls.index(url) +1, len(urls))) \n\tdriver.get(url)\n\ttime.sleep(random.randint(10, 20)) # Scraper buffer\n\telems = driver.find_elements_by_class_name(className)\n\tif not elems:\n\t\tcontinue\n\tif elems: \n\t\tfor elem in elems:\n\t\t\tscraped_data.append(elem.text.encode('utf-8'))\n\t\t\tnew_dataset.append(scraped_data)\ndriver.quit()\n\n###### Export data to .csv #####\nwith open(exported_file,'w', newline='') as csv_file:\n\twriter = csv.writer(csv_file)\n\twriter.writerows(new_dataset)\nprint(\"Writing Complete\")\nsubprocess.call(r'/path/to/exported_file', shell=True)", "sub_path": "windows/vanilla/scraper.py", "file_name": "scraper.py", "file_ext": "py", "file_size_in_byte": 1271, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.chdir", "line_number": 9, "usage_type": "call"}, {"api_name": "selenium.webdriver.ChromeOptions", "line_number": 13, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 13, "usage_type": "name"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 16, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 16, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 28, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 28, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 40, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "203381809", "text": "import inspect\nimport os\nimport os.path\nimport re\nimport tempfile\n\nimport fabric\nfrom fabric import api as fabric_api\nfrom fabric import colors\n\nfrom umd import api\nfrom umd import config\nfrom umd import exception\nfrom umd import system\n\n\ndef to_list(obj):\n if not isinstance(obj, (str, list, tuple)):\n raise exception.ConfigException(\"obj variable type '%s' not supported.\"\n % type(obj))\n elif isinstance(obj, (str, tuple)):\n return [obj]\n return obj\n\n\ndef to_file(r, logfile):\n \"\"\"Writes Fabric capture result to the given file.\"\"\"\n def _write(fname, msg):\n dirname = os.path.dirname(fname)\n if not os.path.exists(dirname):\n os.makedirs(dirname)\n api.info(\"Log directory '%s' has been created.\" % dirname)\n with open(fname, 'a') as f:\n f.write(msg)\n f.flush()\n\n l = []\n try:\n if r.stdout:\n _fname = '.'.join([logfile, \"stdout\"])\n _write(_fname, r.stdout)\n l.append(_fname)\n if r.stderr:\n _fname = '.'.join([logfile, \"stderr\"])\n _write(_fname, r.stderr)\n l.append(_fname)\n except AttributeError:\n if isinstance(r, str):\n _fname = '.'.join([logfile, \"stdout\"])\n _write(_fname, r)\n l.append(_fname)\n return l\n\n\ndef format_error_msg(logs, cmd=None):\n msg_l = []\n if logs:\n msg_l.append(\"See more information in logs (%s).\" % ','.join(logs))\n if cmd:\n msg_l.append(\"Error while executing command '%s'.\" % cmd)\n\n return ' '.join(msg_l)\n\n\ndef runcmd(cmd,\n chdir=None,\n fail_check=True,\n stop_on_error=False,\n logfile=None,\n get_error_msg=False,\n stderr_to_stdout=False):\n \"\"\"Runs a generic command.\n\n cmd: command to execute.\n chdir: local directory to run the command from.\n fail_check: boolean that indicates if the workflow must be\n interrupted in case of failure.\n stop_on_error: whether abort or not in case of failure.\n logfile: file to log the command execution.\n get_error_msg: return the formatted error message.\n stderr_to_stdout: redirect standard error to standard output.\n \"\"\"\n def _run():\n with fabric_api.settings(warn_only=True):\n with fabric_api.shell_env(LC_ALL=\"en_US.UTF-8\",\n LANG=\"en_US.UTF-8\"):\n r = fabric_api.local(cmd, capture=True)\n return r\n\n if stderr_to_stdout:\n cmd = ' '.join([cmd, \"2>&1\"])\n\n if chdir:\n with fabric.context_managers.lcd(chdir):\n r = _run()\n else:\n r = _run()\n\n logs = []\n if logfile:\n logs = to_file(r, logfile)\n if logs:\n r.logfile = logs\n\n if fail_check and r.failed:\n msg = format_error_msg(logs, cmd)\n if stop_on_error:\n fabric_api.abort(api.fail(msg))\n else:\n api.fail(msg)\n if get_error_msg:\n # if not msg:\n # debug(\"No message was created for command '%s'\" % cmd)\n r.msgerror = msg\n\n return r\n\n\nclass Yum(object):\n def __init__(self):\n self.path = \"/etc/yum.repos.d/\"\n self.extension = \".repo\"\n self.repodir = \"repofiles\"\n\n def run(self, action, dryrun, pkgs=None):\n opts = ''\n if dryrun:\n if system.distro_version == \"redhat5\":\n runcmd(\"yum -y install yum-downloadonly\")\n elif system.distro_version == \"redhat6\":\n runcmd(\"yum -y install yum-plugin-downloadonly\")\n opts = \"--downloadonly\"\n\n if action == \"refresh\":\n action = \"makecache\"\n\n if pkgs:\n return \"yum -y %s %s %s\" % (opts, action, \" \".join(pkgs))\n else:\n return \"yum -y %s %s\" % (opts, action)\n\n def get_pkglist(self, r):\n \"\"\"Gets the list of packages being installed parsing yum output.\"\"\"\n d = {}\n for line in filter(None, r.stdout.split('=')):\n if line.startswith(\"\\nInstalling:\\n\"):\n for line2 in line.split('\\n'):\n try:\n name, arch, version, repo, size, unit = line2.split()\n d[name] = '.'.join(['-'.join([name, version]), arch])\n except ValueError:\n pass\n\n # YUM: last version installed\n for line in r.stdout.split('\\n'):\n m = re.search(\"Package (.+) already installed and latest version\",\n line)\n if m:\n all = ' '.join(m.groups())\n name = re.search(\"([a-zA-Z0-9-_]+)-\\d+.+\", all).groups()[0]\n d[name] = ' '.join([all, \"(already installed)\"])\n\n return d\n\n def get_repos(self):\n l = []\n is_repo = False\n for line in runcmd(\"yum repolist\").split('\\n'):\n l_str = filter(None, line.split(' '))\n if \"repo id\" in l_str:\n is_repo = True\n continue\n try:\n if l_str[0].startswith(\"repolist\"):\n is_repo = False\n continue\n except IndexError:\n pass\n if is_repo:\n l.append(l_str[0])\n return l\n\n def remove_repo(self, repolist):\n \"\"\"Remove all the appearances of a list of repositories.\n\n :repolist: list of repository names (ID between brackets)\n \"\"\"\n for repo in repolist:\n r = runcmd(\"grep %s %s/* | cut -d':' -f1|uniq\" % (repo, self.path))\n if r:\n for f in r.split('\\n'):\n os.remove(f)\n api.info(\"Existing repository '%s' removed.\" % f)\n\n\nclass Apt(object):\n def __init__(self):\n self.path = \"/etc/apt/sources.list.d/\"\n self.extension = \".list\"\n # FIXME this is not right\n self.repodir = \"repo-files\"\n\n def run(self, action, dryrun, pkgs=None):\n if pkgs:\n if os.path.exists(pkgs[0]):\n return \"dpkg -i %s\" % \" \".join(pkgs)\n\n opts = ''\n if dryrun:\n opts = \"--dry-run\"\n\n if action == \"refresh\":\n action = \"update\"\n\n if pkgs:\n return \"apt-get -y %s %s %s\" % (opts, action, \" \".join(pkgs))\n else:\n return \"apt-get -y %s %s\" % (opts, action)\n\n def get_repos(self):\n \"\"\"Gets the list of enabled repositories.\"\"\"\n return runcmd((\"grep -h ^deb /etc/apt/sources.list \"\n \"/etc/apt/sources.list.d/*\")).split('\\n')\n\n def remove_repo(self, repolist):\n \"\"\"Remove all the appearances of a list of repositories.\n\n :repolist: list of repository names.\n \"\"\"\n install(\"software-properties-common\")\n available_repos = self.get_repos()\n\n for repo in repolist:\n for available_repo in available_repos:\n if available_repo.find(repo) != -1:\n runcmd(\"apt-add-repository -r '%s'\" % available_repo)\n api.info(\"Existing repository removed: %s\"\n % available_repo)\n\n def get_pkglist(self, r):\n d = {}\n for line in r.split('\\n'):\n m = re.search((\"^Setting up ([a-zA-Z-]+) \"\n \"\\((.+)\\)\"), line)\n if m:\n pkg, version = m.groups()\n d[pkg] = '-'.join([pkg, version])\n return d\n\n\nclass PkgTool(object):\n def __init__(self):\n self.client = {\n \"centos\": Yum,\n \"debian\": Apt,\n \"redhat\": Yum,\n \"ubuntu\": Apt,\n }[system.distname]()\n self.dryrun = False\n\n def get_path(self):\n return self.client.path\n\n def get_extension(self):\n return self.client.extension\n\n def get_repodir(self):\n return self.client.repodir\n\n def get_pkglist(self, r):\n return self.client.get_pkglist(r)\n\n def get_repos(self):\n return self.client.get_repos()\n\n def enable_repo(self, repolist):\n if not os.path.exists(self.client.path):\n os.makedirs(self.client.path)\n l = []\n for repo in to_list(repolist):\n r = runcmd(\"wget %s -O %s\" % (repo,\n os.path.join(\n self.client.path,\n os.path.basename(repo))))\n if r.failed:\n l.append(repo)\n return l\n\n def remove_repo(self, repolist):\n return self.client.remove_repo(to_list(repolist))\n\n def install(self, pkgs, enable_repo=[]):\n if enable_repo:\n self.enable_repo(enable_repo)\n return self._exec(action=\"install\", pkgs=pkgs)\n\n def refresh(self):\n return self._exec(action=\"refresh\")\n\n def remove(self, pkgs):\n return self._exec(action=\"remove\", pkgs=pkgs)\n\n def update(self):\n return self._exec(action=\"update\")\n\n def _exec(self, action, pkgs=None):\n try:\n if pkgs:\n pkgs = to_list(pkgs)\n return self.client.run(action, self.dryrun, pkgs=pkgs)\n else:\n return self.client.run(action, self.dryrun)\n except KeyError:\n raise exception.InstallException(\"'%s' OS not supported\"\n % system.distname)\n\n\ndef show_exec_banner():\n \"\"\"Displays execution banner.\"\"\"\n cfg = config.CFG.copy()\n\n print(u'\\n\\u250C %s ' % colors.green(\" UMD verification app\")\n + u'\\u2500' * 49 + u'\\u2510')\n print(u'\\u2502' + u' ' * 72 + u'\\u2502')\n print(u'\\u2502%s %s' % (\"Quality criteria:\".rjust(25),\n colors.blue(\"http://egi-qc.github.io\"))\n + u' ' * 23 + u'\\u2502')\n print(u'\\u2502%s %s' % (\"Codebase:\".rjust(25),\n colors.blue(\"https://github.com/egi-qc/umd-verification\"))\n + u' ' * 4 + u'\\u2502')\n print(u'\\u2502' + u' ' * 72 + u'\\u2502')\n print(u'\\u2502' + u' ' * 7 + u'\\u2500' * 65 + u'\\u2518')\n\n print(u'\\u2502' + u' ' * 72)\n if \"repository_url\" in cfg.keys() and cfg[\"repository_url\"]:\n print(u'\\u2502 Verification repositories used:')\n repos = to_list(cfg.pop(\"repository_url\"))\n for repo in repos:\n print(u'\\u2502\\t%s' % colors.blue(repo))\n\n print(u'\\u2502')\n print(u'\\u2502 Repository basic configuration:')\n basic_repo = [\"umd_release\", \"igtf_repo\"]\n if system.distname in [\"redhat\", \"centos\"]:\n basic_repo.append(\"epel_release\")\n for k in basic_repo:\n v = cfg.pop(k)\n leftjust = len(max(basic_repo, key=len)) + 5\n print(u'\\u2502\\t%s %s' % (k.ljust(leftjust), colors.blue(v)))\n\n print(u'\\u2502')\n print(u'\\u2502 Path locations:')\n for k in [\"log_path\", \"yaim_path\", \"puppet_path\"]:\n v = cfg.pop(k)\n leftjust = len(max(basic_repo, key=len)) + 5\n print(u'\\u2502\\t%s %s' % (k.ljust(leftjust), v))\n\n if cfg[\"qc_envvars\"]:\n print(u'\\u2502')\n print(u'\\u2502 Local environment variables passed:')\n leftjust = len(max(cfg[\"qc_envvars\"], key=len)) + 5\n for k, v in cfg[\"qc_envvars\"].items():\n cfg.pop(\"qcenv_%s\" % k)\n print(u'\\u2502\\t%s %s' % (k.ljust(leftjust), v))\n\n print(u'\\u2502')\n print(u'\\u2514' + u'\\u2500' * 72)\n\n\ndef check_input():\n \"\"\"Performs a list of checks based on input parameters.\"\"\"\n # 1) Type of installation\n if config.CFG[\"installation_type\"]:\n api.info(\"Installation type: %s\" % config.CFG[\"installation_type\"])\n else:\n api.fail((\"Need to provide the type of installation to be performed: \"\n \"(install, upgrade)\"), do_abort=True)\n # 2) Verification repository URL\n if not config.CFG[\"repository_url\"]:\n api.warn(\"No verification repository URL provided.\")\n # 3) Metapackage\n if config.CFG[\"metapkg\"]:\n msg = \"Metapackage/s selected: %s\" % ''.join([\n \"\\n\\t+ %s\" % mpkg for mpkg in config.CFG[\"metapkg\"]])\n api.info(msg)\n print(u'\\u2500' * 73)\n\n\ndef get_class_attrs(obj):\n \"\"\"Retuns a list of the class attributes for a given object.\"\"\"\n return dict([(attr, getattr(obj, attr))\n for attr in dict(inspect.getmembers(\n obj,\n lambda a:not(inspect.isroutine(a)))).keys()\n if not attr.startswith('__')])\n\n\ndef install(pkgs, enable_repo=[]):\n \"\"\"Shortcut for package installations.\"\"\"\n pkgtool = PkgTool()\n return runcmd(pkgtool.install(pkgs, enable_repo))\n\n\ndef get_repos():\n \"\"\"Shortcut for getting enabled repositories in the system.\"\"\"\n pkgtool = PkgTool()\n return pkgtool.get_repos()\n\n\ndef remove_repo(repo):\n \"\"\"Shortcut for removing repository files.\"\"\"\n pkgtool = PkgTool()\n return pkgtool.remove_repo(repo)\n\n\ndef is_on_path(prog):\n \"\"\"Checks if a given executable is on the current PATH.\"\"\"\n r = runcmd(\"which %s\" % prog)\n if r.failed:\n return False\n else:\n return r\n\n\ndef clone_repo(repotype, repourl):\n \"\"\"Clone a repository in a temporary directory.\"\"\"\n dirname = tempfile.mkdtemp()\n\n if repotype in [\"git\"]:\n if not is_on_path(\"git\"):\n r = install(\"git\")\n if r.failed:\n api.fail(\"Could not install 'git'.\")\n cmd = \"git clone %s %s\" % (repourl, dirname)\n elif repotype in [\"hg\", \"mercurial\"]:\n if not is_on_path(\"hg\"):\n r = install(\"mercurial\")\n if r.failed:\n api.fail(\"Could not install 'mercurial'.\")\n cmd = \"hg clone %s %s\" % (repourl, dirname)\n else:\n raise NotImplementedError((\"Current implementation does not support \"\n \"repository type '%s'\" % repotype))\n\n r = runcmd(cmd)\n if r.failed:\n api.fail(\"Could not clone repository '%s' (via %s)\"\n % (repourl, repotype))\n dirname = None\n os.rmdir(dirname)\n return dirname\n", "sub_path": "umd/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 14313, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "umd.exception.ConfigException", "line_number": 19, "usage_type": "call"}, {"api_name": "umd.exception", "line_number": 19, "usage_type": "name"}, {"api_name": "os.path.dirname", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 31, "usage_type": "call"}, {"api_name": "umd.api.info", "line_number": 32, "usage_type": "call"}, {"api_name": "umd.api", "line_number": 32, "usage_type": "name"}, {"api_name": "fabric.api.settings", "line_number": 84, "usage_type": "call"}, {"api_name": "fabric.api", "line_number": 84, "usage_type": "name"}, {"api_name": "fabric.api.shell_env", "line_number": 85, "usage_type": "call"}, {"api_name": "fabric.api", "line_number": 85, "usage_type": "name"}, {"api_name": "fabric.api.local", "line_number": 87, "usage_type": "call"}, {"api_name": "fabric.api", "line_number": 87, "usage_type": "name"}, {"api_name": "fabric.context_managers.lcd", "line_number": 94, "usage_type": "call"}, {"api_name": "fabric.context_managers", "line_number": 94, "usage_type": "attribute"}, {"api_name": "fabric.api.abort", "line_number": 108, "usage_type": "call"}, {"api_name": "fabric.api", "line_number": 108, "usage_type": "name"}, {"api_name": "umd.api.fail", "line_number": 108, "usage_type": "call"}, {"api_name": "umd.api", "line_number": 108, "usage_type": "name"}, {"api_name": "umd.api.fail", "line_number": 110, "usage_type": "call"}, {"api_name": "umd.api", "line_number": 110, "usage_type": "name"}, {"api_name": "umd.system.distro_version", "line_number": 128, "usage_type": "attribute"}, {"api_name": "umd.system", "line_number": 128, "usage_type": "name"}, {"api_name": "umd.system.distro_version", "line_number": 130, "usage_type": "attribute"}, {"api_name": "umd.system", "line_number": 130, "usage_type": "name"}, {"api_name": "re.search", "line_number": 156, "usage_type": "call"}, {"api_name": "re.search", "line_number": 160, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 192, "usage_type": "call"}, {"api_name": "umd.api.info", "line_number": 193, "usage_type": "call"}, {"api_name": "umd.api", "line_number": 193, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 205, "usage_type": "call"}, {"api_name": "os.path", "line_number": 205, "usage_type": "attribute"}, {"api_name": "umd.api.info", "line_number": 237, "usage_type": "call"}, {"api_name": "umd.api", "line_number": 237, "usage_type": "name"}, {"api_name": "re.search", "line_number": 243, "usage_type": "call"}, {"api_name": "umd.system.distname", "line_number": 258, "usage_type": "attribute"}, {"api_name": "umd.system", "line_number": 258, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 277, "usage_type": "call"}, {"api_name": "os.path", "line_number": 277, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 278, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 282, "usage_type": "call"}, {"api_name": "os.path", "line_number": 282, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 284, "usage_type": "call"}, {"api_name": "os.path", "line_number": 284, "usage_type": "attribute"}, {"api_name": "umd.exception.InstallException", "line_number": 314, "usage_type": "call"}, {"api_name": "umd.exception", "line_number": 314, "usage_type": "name"}, {"api_name": "umd.system.distname", "line_number": 315, "usage_type": "attribute"}, {"api_name": "umd.system", "line_number": 315, "usage_type": "name"}, {"api_name": "umd.config.CFG.copy", "line_number": 320, "usage_type": "call"}, {"api_name": "umd.config.CFG", "line_number": 320, "usage_type": "attribute"}, {"api_name": "umd.config", "line_number": 320, "usage_type": "name"}, {"api_name": "fabric.colors.green", "line_number": 322, "usage_type": "call"}, {"api_name": "fabric.colors", "line_number": 322, "usage_type": "name"}, {"api_name": "fabric.colors.blue", "line_number": 326, "usage_type": "call"}, {"api_name": "fabric.colors", "line_number": 326, "usage_type": "name"}, {"api_name": "fabric.colors.blue", "line_number": 329, "usage_type": "call"}, {"api_name": "fabric.colors", "line_number": 329, "usage_type": "name"}, {"api_name": "fabric.colors.blue", "line_number": 339, "usage_type": "call"}, {"api_name": "fabric.colors", "line_number": 339, "usage_type": "name"}, {"api_name": "umd.system.distname", "line_number": 344, "usage_type": "attribute"}, {"api_name": "umd.system", "line_number": 344, "usage_type": "name"}, {"api_name": "fabric.colors.blue", "line_number": 349, "usage_type": "call"}, {"api_name": "fabric.colors", "line_number": 349, "usage_type": "name"}, {"api_name": "umd.config.CFG", "line_number": 373, "usage_type": "attribute"}, {"api_name": "umd.config", "line_number": 373, "usage_type": "name"}, {"api_name": "umd.api.info", "line_number": 374, "usage_type": "call"}, {"api_name": "umd.api", "line_number": 374, "usage_type": "name"}, {"api_name": "umd.config.CFG", "line_number": 374, "usage_type": "attribute"}, {"api_name": "umd.config", "line_number": 374, "usage_type": "name"}, {"api_name": "umd.api.fail", "line_number": 376, "usage_type": "call"}, {"api_name": "umd.api", "line_number": 376, "usage_type": "name"}, {"api_name": "umd.config.CFG", "line_number": 379, "usage_type": "attribute"}, {"api_name": "umd.config", "line_number": 379, "usage_type": "name"}, {"api_name": "umd.api.warn", "line_number": 380, "usage_type": "call"}, {"api_name": "umd.api", "line_number": 380, "usage_type": "name"}, {"api_name": "umd.config.CFG", "line_number": 382, "usage_type": "attribute"}, {"api_name": "umd.config", "line_number": 382, "usage_type": "name"}, {"api_name": "umd.config.CFG", "line_number": 384, "usage_type": "attribute"}, {"api_name": "umd.config", "line_number": 384, "usage_type": "name"}, {"api_name": "umd.api.info", "line_number": 385, "usage_type": "call"}, {"api_name": "umd.api", "line_number": 385, "usage_type": "name"}, {"api_name": "inspect.getmembers", "line_number": 392, "usage_type": "call"}, {"api_name": "inspect.isroutine", "line_number": 394, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 427, "usage_type": "call"}, {"api_name": "umd.api.fail", "line_number": 433, "usage_type": "call"}, {"api_name": "umd.api", "line_number": 433, "usage_type": "name"}, {"api_name": "umd.api.fail", "line_number": 439, "usage_type": "call"}, {"api_name": "umd.api", "line_number": 439, "usage_type": "name"}, {"api_name": "umd.api.fail", "line_number": 447, "usage_type": "call"}, {"api_name": "umd.api", "line_number": 447, "usage_type": "name"}, {"api_name": "os.rmdir", "line_number": 450, "usage_type": "call"}]} +{"seq_id": "345723914", "text": "import pandas as pd\nimport numpy as np\nfrom datetime import datetime, timezone, timedelta\nimport random\n\nPATH = 'data/cosmetics/'\nFILE = 'interactions' # all\n# FILE = 'interactions_last_2_months' # last 3 months\n# FILE = 'interactions_last_3_months' # last 3 months\n# FILE = 'interactions_feb' # last 1 month\n\n# keys\n\nUSER_KEY='user_id'\nITEM_KEY='product_id'\nTIME_KEY='event_time'\nSESSION_KEY='user_session'\nTYPE_KEY='event_type'\nACTION_TYPE=\"view\"\n# filters\nMIN_ITEM_SUPPORT = 5 # 20\nMIN_SESSION_LENGTH = 2 # 3\nMIN_USER_SESSIONS = 3 # 5\nMAX_USER_SESSIONS = None # 200\nREPEAT = False # apply filters several times\nCLEAN_TEST = True\nSLICES_NUM = 5\nSESSION_THRESHOLD = 30 * 60\nSLICE_INTERVAL = 31 # total_interval = 152 (all)\n# SLICE_INTERVAL = 18 #6 #12 # total_interval = last 3 month\nDAYS_OFFSET = 0\nSAMPLE = True # False\nSAMPLE_PERCENTAGE = 10\n\ndef make_sessions(data, session_th=SESSION_THRESHOLD, is_ordered=False):\n del data[SESSION_KEY]\n \"\"\"Assigns session ids to the events in data without grouping keys\"\"\"\n if not is_ordered:\n # sort data by user and time\n data.sort_values(by=[USER_KEY, TIME_KEY], ascending=True, inplace=True)\n # compute the time difference between queries\n tdiff = np.diff(data[TIME_KEY].values)\n # check which of them are bigger then session_th\n split_session = tdiff > session_th\n split_session = np.r_[True, split_session]\n # check when the user chenges is data\n # new_user = data['user_id'].values[1:] != data['user_id'].values[:-1]\n new_user = data[USER_KEY].values[1:] != data[USER_KEY].values[:-1]\n new_user = np.r_[True, new_user]\n # a new sessions stars when at least one of the two conditions is verified\n new_session = np.logical_or(new_user, split_session)\n # compute the session ids\n session_ids = np.cumsum(new_session)\n data[SESSION_KEY] = session_ids\n return data\n\ndef slice_data(data, num_slices=SLICES_NUM, days_offset=DAYS_OFFSET, days_shift=SLICE_INTERVAL):\n for slice_id in range(0, num_slices):\n split_data_slice(data, slice_id, days_offset, days_shift)\n\n\ndef split_data_slice(data, slice_id, days_offset, days_shift):\n start_day = days_offset + (slice_id * days_shift)\n start = datetime.fromtimestamp(data[TIME_KEY].min(), timezone.utc) + timedelta(start_day)\n end_day = days_offset + ((slice_id+1) * days_shift)\n end = datetime.fromtimestamp(data[TIME_KEY].min(), timezone.utc) + timedelta(end_day)\n\n # prefilter the timespan\n session_max_times = (data.groupby([SESSION_KEY])[TIME_KEY]).max()\n greater_start = session_max_times[session_max_times >= start.timestamp()].index\n lower_end = session_max_times[session_max_times <= end.timestamp()].index\n data = data[np.in1d(data[SESSION_KEY], greater_start.intersection(lower_end))]\n\n\n data_start = datetime.fromtimestamp(data[TIME_KEY].min(), timezone.utc)\n data_end = datetime.fromtimestamp(data[TIME_KEY].max(), timezone.utc)\n\n print('Slice data set {}\\n\\tEvents: {}\\n\\tUsers: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {}\\n\\n'.\n format(slice_id, len(data), data[USER_KEY].nunique(), data[SESSION_KEY].nunique(), data[ITEM_KEY].nunique(),\n data_start.date().isoformat(), data_end.date().isoformat()))\n\n print('--------------------- Slice-Original---')\n\n\n report_statistics(data)\n\n data = filter_data(data)\n\n if SAMPLE:\n data = sample(data)\n\n data_start = datetime.fromtimestamp(data[TIME_KEY].min(), timezone.utc)\n data_end = datetime.fromtimestamp(data[TIME_KEY].max(), timezone.utc)\n print('--------------------- Sampled---')\n print('Sampled data set\\n\\tEvents: {}\\n\\tUsers: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {}\\n\\n'.\n format(len(data), data[USER_KEY].nunique(), data[SESSION_KEY].nunique(), data[ITEM_KEY].nunique(),\n data_start.date().isoformat(),\n data_end.date().isoformat()))\n report_statistics(data)\n\n # training-test split\n split_data(data, MIN_SESSION_LENGTH)\n\n\ndef last_session_out_split(data, min_session_length):\n \"\"\"\n last-session-out split\n assign the last session of every user to the test set and the remaining ones to the training set\n \"\"\"\n sessions = data.sort_values(by=[USER_KEY, TIME_KEY]).groupby(USER_KEY)[SESSION_KEY]\n last_session = sessions.last()\n train = data[~data[SESSION_KEY].isin(last_session.values)].copy()\n test = data[data[SESSION_KEY].isin(last_session.values)].copy()\n if CLEAN_TEST:\n train_items = train[ITEM_KEY].unique()\n test = test[test[ITEM_KEY].isin(train_items)]\n #  remove sessions in test shorter than min_session_length\n slen = test[SESSION_KEY].value_counts()\n good_sessions = slen[slen >= min_session_length].index\n test = test[test[SESSION_KEY].isin(good_sessions)].copy()\n train = train.reset_index(drop=True)\n test = test.reset_index(drop=True)\n return train, test\n\n\ndef split_data(data, min_session_length): #TODO: extend for supproting more than one sessions per use for test\n \"\"\"\n assign the last session of every user to the test set and the remaining ones to the training set\n \"\"\"\n train_full_sessions, test_sessions = last_session_out_split(data, min_session_length)\n train_valid_sessions, valid_sessions = last_session_out_split(train_full_sessions, min_session_length)\n\n print('--------------------- Training---')\n data = train_full_sessions\n data_start = datetime.fromtimestamp(data[TIME_KEY].min(), timezone.utc)\n data_end = datetime.fromtimestamp(data[TIME_KEY].max(), timezone.utc)\n # print('Training data set\\n\\tEvents: {}\\n\\tUsers: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {}\\n\\n'.\n # format(len(data), data[USER_KEY].nunique(), data[SESSION_KEY].nunique(), data[ITEM_KEY].nunique(),\n # data_start.date().isoformat(), data_end.date().isoformat()))\n # report_statistics(data)\n\n print('--------------------- Test---')\n data = test_sessions\n data_start = datetime.fromtimestamp(data[TIME_KEY].min(), timezone.utc)\n data_end = datetime.fromtimestamp(data[TIME_KEY].max(), timezone.utc)\n # print('Test data set\\n\\tEvents: {}\\n\\tUsers: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {}\\n\\n'.\n # format(len(data), data[USER_KEY].nunique(), data[SESSION_KEY].nunique(), data[ITEM_KEY].nunique(),\n # data_start.date().isoformat(), data_end.date().isoformat()))\n # report_statistics(data)\n\n print('--------------------- Validation_training---:')\n data = train_valid_sessions\n data_start = datetime.fromtimestamp(data[TIME_KEY].min(), timezone.utc)\n data_end = datetime.fromtimestamp(data[TIME_KEY].max(), timezone.utc)\n # print('Validation_training data set\\n\\tEvents: {}\\n\\tUsers: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {}\\n\\n'.\n # format(len(data), data[USER_KEY].nunique(), data[SESSION_KEY].nunique(), data[ITEM_KEY].nunique(),\n # data_start.date().isoformat(), data_end.date().isoformat()))\n # report_statistics(data)\n\n print('--------------------- Validation_test---')\n data = valid_sessions\n data_start = datetime.fromtimestamp(data[TIME_KEY].min(), timezone.utc)\n data_end = datetime.fromtimestamp(data[TIME_KEY].max(), timezone.utc)\n # print('Validation_test data set\\n\\tEvents: {}\\n\\tUsers: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {}\\n\\n'.\n # format(len(data), data[USER_KEY].nunique(), data[SESSION_KEY].nunique(), data[ITEM_KEY].nunique(),\n # data_start.date().isoformat(), data_end.date().isoformat()))\n # report_statistics(data)\n\n\ndef filter_data(data):\n condition = data.groupby(USER_KEY)[SESSION_KEY].nunique().min() >= MIN_USER_SESSIONS and data.groupby(\n [USER_KEY, SESSION_KEY]).size().min() >= MIN_SESSION_LENGTH and data.groupby(\n [ITEM_KEY]).size().min() >= MIN_ITEM_SUPPORT\n counter = 1\n while not condition:\n print(counter)\n # keep items with >=5 interactions\n item_pop = data[ITEM_KEY].value_counts()\n good_items = item_pop[item_pop >= MIN_ITEM_SUPPORT].index\n data = data[data[ITEM_KEY].isin(good_items)]\n # remove sessions with length < 2\n session_length = data[SESSION_KEY].value_counts()\n good_sessions = session_length[session_length >= MIN_SESSION_LENGTH].index\n data = data[data[SESSION_KEY].isin(good_sessions)]\n # let's keep only returning users (with >= 2 sessions)\n sess_per_user = data.groupby(USER_KEY)[SESSION_KEY].nunique()\n good_users = sess_per_user[sess_per_user >= MIN_USER_SESSIONS].index\n data = data[data[USER_KEY].isin(good_users)]\n condition = data.groupby(USER_KEY)[SESSION_KEY].nunique().min() >= MIN_USER_SESSIONS and data.groupby(\n [USER_KEY, SESSION_KEY]).size().min() >= MIN_SESSION_LENGTH and data.groupby(\n [ITEM_KEY]).size().min() >= MIN_ITEM_SUPPORT\n counter += 1\n if not REPEAT:\n break\n\n # output\n data_start = datetime.fromtimestamp(data[TIME_KEY].min(), timezone.utc)\n data_end = datetime.fromtimestamp(data[TIME_KEY].max(), timezone.utc)\n\n print('Filtered data set\\n\\tEvents: {}\\n\\tUsers: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {}\\n\\n'.\n format(len(data), data[USER_KEY].nunique(), data[SESSION_KEY].nunique(), data[ITEM_KEY].nunique(),\n data_start.date().isoformat(),\n data_end.date().isoformat()))\n\n print('--------------------- Slice-Filtered---')\n report_statistics(data)\n return data\n\n\ndef report_statistics(data):\n print('--------------------- Statistics---')\n sess_per_user = data.groupby(USER_KEY)[SESSION_KEY].nunique()\n print('Min num of users\\' sessions: {}'.format(sess_per_user.min()))\n print('Min sessions\\' length: {}'.format(data.groupby([USER_KEY, SESSION_KEY]).size().min()))\n print('Min num of interactions done with an item: {}'.format(data.groupby([ITEM_KEY]).size().min()))\n print('---------------------')\n # print('Num of users: {}'.format(data[USER_KEY].nunique()))\n # print('Max num of users\\' interactions: {}'.format(data.groupby([USER_KEY]).size().max()))\n # print('Min num of users\\' interactions: {}'.format(data.groupby([USER_KEY]).size().min()))\n # print('Median num of users\\' interactions: {}'.format(data.groupby([USER_KEY]).size().median()))\n # print('Mean num of users\\' interactions: {}'.format(data.groupby([USER_KEY]).size().mean()))\n # print('Std num of users\\' interactions: {}'.format(data.groupby([USER_KEY]).size().std()))\n sess_per_user = data.groupby(USER_KEY)[SESSION_KEY].nunique()\n print('Max num of users\\' sessions: {}'.format(sess_per_user.max()))\n print('Min num of users\\' sessions: {}'.format(sess_per_user.min()))\n print('Median num of users\\' sessions: {}'.format(sess_per_user.median()))\n print('Mean num of users\\' sessions: {}'.format(sess_per_user.mean()))\n print('Std num of users\\' sessions: {}'.format(sess_per_user.std()))\n print('---------------------')\n # print('Num of sessions per user: {}'.format(np.count_nonzero(data.groupby(USER_KEY)[SESSION_KEY].nunique())))\n print('Max sessions\\' length: {}'.format(data.groupby([USER_KEY, SESSION_KEY]).size().max()))\n print('Min sessions\\' length: {}'.format(data.groupby([USER_KEY, SESSION_KEY]).size().min()))\n print('Median sessions\\' length: {}'.format(data.groupby([USER_KEY, SESSION_KEY]).size().median()))\n print('Mean sessions\\' length: {}'.format(data.groupby([USER_KEY, SESSION_KEY]).size().mean()))\n print('Std sessions\\' length: {}'.format(data.groupby([USER_KEY, SESSION_KEY]).size().std()))\n print('---------------------')\n # print('Num of items: {}'.format(data[ITEM_KEY].nunique()))\n # print('Max num of interactions done with an item: {}'.format(data.groupby([ITEM_KEY]).size().max()))\n # print('Min num of interactions done with an item: {}'.format(data.groupby([ITEM_KEY]).size().min()))\n # print('Median num of interactions done with an item: {}'.format(data.groupby([ITEM_KEY]).size().median()))\n # print('Mean num of interactions done with an item: {}'.format(data.groupby([ITEM_KEY]).size().mean()))\n # print('Std num of interactions done with an item: {}'.format(data.groupby([ITEM_KEY]).size().std()))\n\n print('Max num of interactions done with an item: {}'.format(data[ITEM_KEY].value_counts().max()))\n print('Min num of interactions done with an item: {}'.format(data[ITEM_KEY].value_counts().min()))\n print('Median num of interactions done with an item: {}'.format(data[ITEM_KEY].value_counts().median()))\n print('Mean num of interactions done with an item: {}'.format(data[ITEM_KEY].value_counts().mean()))\n print('Std num of interactions done with an item: {}'.format(data[ITEM_KEY].value_counts().std()))\n print('---------------------')\n\n\ndef clear_sessions(data):\n \"\"\"Delete sessions which the session_id is the same for different users!\"\"\"\n data = data[data[SESSION_KEY].isin(data.groupby(SESSION_KEY)[USER_KEY].nunique()[\n (data.groupby(SESSION_KEY)[USER_KEY].nunique() > 1) == False].index)]\n return data\n\ndef prepare_time(data, time_key=TIME_KEY):\n \"\"\"Assigns session ids to the events in data without grouping keys\"\"\"\n data[time_key] = data[time_key].apply(lambda x: datetime.strptime(x, \"%Y-%m-%d %H:%M:%S %Z\").timestamp())\n data[time_key] = data[time_key].astype('int64')\n return data\n\ndef sample(data):\n users = list(set(data[USER_KEY]))\n random.seed(10)\n users=random.sample(users, int(len(users) * SAMPLE_PERCENTAGE / 100))\n data = data[data[USER_KEY].isin(users)]\n return data\n\nif __name__ == '__main__':\n # updater.dispatcher.add_handler( CommandHandler('status', status) )\n data = pd.read_csv(PATH + FILE + '.csv', sep=',')\n\n # only keep interactions of type 'view'\n data = data[data[TYPE_KEY] == ACTION_TYPE].copy()\n # data = data[data[TYPE_KEY] != ACTION_TYPE].copy()\n # prepare time format\n data = prepare_time(data, time_key=TIME_KEY)\n\n # mapping = pd.Series(index=data[SESSION_KEY].unique(), data=range(1, len(data[SESSION_KEY].unique()) + 1))\n # data[SESSION_KEY] = data[SESSION_KEY].map(mapping)\n print('Building sessions')\n # partition interactions into sessions with 30-minutes idle time\n data = make_sessions(data, session_th=SESSION_THRESHOLD, is_ordered=False)\n\n data_start = datetime.fromtimestamp(data[TIME_KEY].min(), timezone.utc)\n data_end = datetime.fromtimestamp(data[TIME_KEY].max(), timezone.utc)\n\n print('Original data set\\n\\tEvents: {}\\n\\tUsers: {}\\n\\tSessions: {}\\n\\tItems: {}\\n\\tSpan: {} / {}\\n\\n'.\n format(len(data), data[USER_KEY].nunique(), data[SESSION_KEY].nunique(), data[ITEM_KEY].nunique(), data_start.date().isoformat(),\n data_end.date().isoformat()))\n\n print('--------------------- Original---')\n report_statistics(data)\n\n slice_data(data)\n\n", "sub_path": "backup/preprocessing/check_statistics/cosmetics/cosmetics_statistics_sliding_sampling_splitting.py", "file_name": "cosmetics_statistics_sliding_sampling_splitting.py", "file_ext": "py", "file_size_in_byte": 15039, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.diff", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.r_", "line_number": 45, "usage_type": "attribute"}, {"api_name": "numpy.r_", "line_number": 49, "usage_type": "attribute"}, {"api_name": "numpy.logical_or", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 64, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 64, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 64, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 64, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 66, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 66, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 66, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 66, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.in1d", "line_number": 72, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 75, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 75, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 75, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 75, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 76, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 76, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 76, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 92, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 92, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 92, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 92, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 93, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 93, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 93, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 135, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 135, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 135, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 135, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 136, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 136, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 136, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 136, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 144, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 144, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 144, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 144, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 145, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 145, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 145, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 145, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 153, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 153, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 153, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 153, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 154, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 154, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 154, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 154, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 162, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 162, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 162, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 162, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 163, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 163, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 163, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 163, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 197, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 197, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 197, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 197, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 198, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 198, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 198, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 198, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 260, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 260, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 266, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 267, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 273, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 287, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 287, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 287, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 287, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 288, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 288, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 288, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 288, "usage_type": "name"}]} +{"seq_id": "20740267", "text": "from django.db import models\nfrom django.db.models import Q, Sum, F, FloatField, ExpressionWrapper\n\nclass CarroComprasManager(models.Manager):\n\n def total_cobrar(self,pk):\n\n consulta = self.filter(user=pk).aggregate(\n total = Sum (\n F('cantidad')*F('productos__precio'),\n output_field= FloatField()\n ),\n )\n\n if consulta['total']:\n return consulta['total']\n else:\n return 0\n\n\nclass ReservasManager(models.Manager):\n\n def total_cobrar_reservas(self,pk):\n\n consulta = self.filter(user=pk).aggregate(\n total = Sum (\n F('cantidad')*F('productos__precio'),\n output_field= FloatField()\n ),\n )\n\n if consulta['total']:\n return consulta['total']\n else:\n return 0", "sub_path": "applications/compras/managers.py", "file_name": "managers.py", "file_ext": "py", "file_size_in_byte": 862, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.db.models.Manager", "line_number": 4, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 4, "usage_type": "name"}, {"api_name": "django.db.models.Sum", "line_number": 9, "usage_type": "call"}, {"api_name": "django.db.models.F", "line_number": 10, "usage_type": "call"}, {"api_name": "django.db.models.FloatField", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models.Manager", "line_number": 21, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.Sum", "line_number": 26, "usage_type": "call"}, {"api_name": "django.db.models.F", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.models.FloatField", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "449006061", "text": "from django.contrib import admin\n\nfrom training_server.models import AutoSklearnConfig, TpotConfig\n\n#from automl_server.automl_systems.tpot.run import train as train_tpot\n\n\nclass TpotConfigAdmin(admin.ModelAdmin):\n\n fieldsets = (\n ('General Info:', {'fields': ('framework', 'status', 'date_trained', 'model_path')}),\n ('Resource Options:', {'fields': ('n_jobs', 'max_time_mins', 'max_eval_time_mins',)}),\n ('Model Training Options:', {'fields': (\n 'generations', 'population_size', 'offspring_size', 'mutation_rate', 'crossover_rate', 'subsample', 'random_state', 'config_dict', 'warm_start', 'use_dask', 'early_stop', 'verbosity')}),\n ('Evaluation', {'fields': ('scoring', 'cv', )}),\n ('Caching and storage:', {'fields': (\n 'memory',)})\n )\n\n def get_readonly_fields(self, request, obj=None):\n readonly_fields = ['status', 'model_path', 'date_trained', 'additional_remarks']\n if obj:\n if not 'framework' in readonly_fields:\n readonly_fields.append('framework')\n if obj.training_triggered:\n return [f.name for f in self.model._meta.fields]\n return readonly_fields\n\n\n def save_model(self, request, obj, form, change):\n obj.training_triggered = True\n #train_tpot(obj)\n obj.status = ('in_progress')\n super(TpotConfigAdmin, self).save_model(request, obj, form, change)\n\n def has_add_permission(self, request, obj=None):\n return False\n\n\n\nadmin.site.register(TpotConfig, TpotConfigAdmin)\n", "sub_path": "training_server/admin/tpot_config.py", "file_name": "tpot_config.py", "file_ext": "py", "file_size_in_byte": 1553, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.contrib.admin.ModelAdmin", "line_number": 8, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 8, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 41, "usage_type": "call"}, {"api_name": "training_server.models.TpotConfig", "line_number": 41, "usage_type": "argument"}, {"api_name": "django.contrib.admin.site", "line_number": 41, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "441954176", "text": "from datetime import datetime\nfrom multiprocessing import Pool, cpu_count\nimport time\n\nfrom baike import url_manager, html_parser, html_downloader, html_outputer\n\n\nclass SpiderMain(object):\n def __init__(self):\n self.urls = url_manager.UrlManager()\n self.downloader = html_downloader.HtmlDownLoader()\n self.praser = html_parser.HtmlPraser()\n self.outputer = html_outputer.HtmlOutputer()\n\n def craw(self, root_url):\n\n # 添加新的url\n self.urls.add_new_url(root_url)\n # 当前的条数\n count = 1\n self.task(count)\n count = count + 1\n time.sleep(3)\n time1 = datetime.now()\n # 遍历所有的url\n # pool = Pool(cpu_count())\n #\n # while self.urls.has_new_url():\n # pool.apply(func=self.task,args=(count,))\n # if count == 99:\n # break\n # count = count + 1\n # pool.close()\n # pool.join()\n while self.urls.has_new_url():\n self.task(count)\n if count == 999:\n break\n count = count + 1\n\n # 输出收集好的数据\n self.outputer.output_html()\n time2 = datetime.now()\n print(\"耗时 %s 秒\" % (time2 - time1).seconds) # 23-21\n\n def task(self, count):\n try:\n # 获取一条url\n new_url = self.urls.get_new_url()\n print(\"%d : %s\" % (count, new_url))\n # 下载网页\n html_cont = self.downloader.download(new_url)\n # 解析网页,得到新的url列表和数据\n new_urls, new_data = self.praser.prase(new_url, html_cont)\n # 将url列表添加的url管理器\n self.urls.add_new_urls(new_urls)\n # 收集数据\n self.outputer.collect_data(new_data)\n except(Exception) as e:\n print(\"craw fail:%s\" % (e))\n\n\nif __name__ == \"__main__\":\n root_url = \"https://baike.baidu.com/item/Python/407313\"\n obj_spider = SpiderMain()\n obj_spider.craw(root_url)\n", "sub_path": "baike/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2058, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "baike.url_manager.UrlManager", "line_number": 10, "usage_type": "call"}, {"api_name": "baike.url_manager", "line_number": 10, "usage_type": "name"}, {"api_name": "baike.html_downloader.HtmlDownLoader", "line_number": 11, "usage_type": "call"}, {"api_name": "baike.html_downloader", "line_number": 11, "usage_type": "name"}, {"api_name": "baike.html_parser.HtmlPraser", "line_number": 12, "usage_type": "call"}, {"api_name": "baike.html_parser", "line_number": 12, "usage_type": "name"}, {"api_name": "baike.html_outputer.HtmlOutputer", "line_number": 13, "usage_type": "call"}, {"api_name": "baike.html_outputer", "line_number": 13, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 43, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 43, "usage_type": "name"}]} +{"seq_id": "200794245", "text": "\"\"\"\nXmlListConfig and XmlDictConfig source: http://code.activestate.com/recipes/410469-xml-as-dictionary/\n\"\"\"\nfrom xml.etree import cElementTree\n\nfrom .base import BaseLoader\n\n\nclass XmlLoader(BaseLoader):\n def parse(self, file: open) -> dict:\n root = cElementTree.XML(file.read())\n data = XmlDictConfig(root)\n return data\n\n\nclass XmlListConfig(list):\n def __init__(self, a_list):\n for element in a_list:\n if element:\n # treat like dict\n if len(element) == 1 or element[0].tag != element[1].tag:\n self.append(XmlDictConfig(element))\n # treat like list\n elif element[0].tag == element[1].tag:\n self.append(XmlListConfig(element))\n elif element.text:\n text = element.text.strip()\n if text:\n self.append(text)\n\n\nclass XmlDictConfig(dict):\n \"\"\"\n Example usage:\n\n >>> tree = ElementTree.parse('your_file.xml')\n >>> root = tree.getroot()\n >>> xmldict = XmlDictConfig(root)\n\n Or, if you want to use an XML string:\n\n >>> root = ElementTree.XML(xml_string)\n >>> xmldict = XmlDictConfig(root)\n\n And then use xmldict for what it is... a dict.\n \"\"\"\n\n def __init__(self, parent_element):\n if parent_element.items():\n self.update(dict(parent_element.items()))\n for element in parent_element:\n if element:\n # treat like dict - we assume that if the first two tags\n # in a series are different, then they are all different.\n if len(element) == 1 or element[0].tag != element[1].tag:\n a_dict = XmlDictConfig(element)\n # treat like list - we assume that if the first two tags\n # in a series are the same, then the rest are the same.\n else:\n # here, we put the list in dictionary; the key is the\n # tag name the list elements all share in common, and\n # the value is the list itself\n a_dict = {element[0].tag: XmlListConfig(element)}\n # if the tag has attributes, add those to the dict\n if element.items():\n a_dict.update(dict(element.items()))\n self.update({element.tag: a_dict})\n # this assumes that if you've got an attribute in a tag,\n # you won't be having any text. This may or may not be a\n # good idea -- time will tell. It works for the way we are\n # currently doing XML configuration files...\n # elif element.items():\n # self.update({element.tag: dict(element.items())})\n # finally, if there are no child tags and no attributes, extract\n # the text\n else:\n element_type = element.attrib.get('type') or 'str'\n if element_type == 'int':\n value = try_type(element.text, int)\n elif element_type == 'float':\n value = try_type(element.text, float)\n elif element_type in ['bool', 'boolean']:\n value = True if element.text.lower() in ['true', '1', '+', 'allow'] else False\n else:\n value = element.text\n self.update({element.tag: value})\n\n\ndef try_type(text, result_type):\n try:\n return result_type(text)\n except:\n return text\n", "sub_path": "NucleusUtils/config/engines/xml.py", "file_name": "xml.py", "file_ext": "py", "file_size_in_byte": 3519, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "base.BaseLoader", "line_number": 9, "usage_type": "name"}, {"api_name": "xml.etree.cElementTree.XML", "line_number": 11, "usage_type": "call"}, {"api_name": "xml.etree.cElementTree", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "426938676", "text": "import nbformat\nfrom flask import Flask, jsonify, send_from_directory\nfrom nbconvert import HTMLExporter\nfrom .config import JWT_SECRET_KEY, ALLOW_CORS\nfrom datetime import datetime\n\nfrom src.backend.domain.code_comment import CodeComment\nfrom pygments import highlight\nimport base64\nfrom pygments.formatters.html import HtmlFormatter\nfrom typing import Iterable\nfrom pygments.lexers import guess_lexer_for_filename\nfrom flask import request\nfrom github import Github, Repository, NamedUser\nfrom flask_jwt_extended import (\n JWTManager, jwt_required,\n create_access_token,\n get_jwt_identity,\n jwt_optional\n)\n\nfrom src.backend.api.code_repositories import code_repositories_blueprint\nfrom src.backend.api.notebooks import notebooks_blueprint\n\nclass APIService:\n\n @staticmethod\n def get() -> Flask:\n app = Flask(__name__)\n\n if ALLOW_CORS:\n from flask_cors import CORS\n\n CORS(app, resources={r'/*': {'origins': '*'}})\n\n app.config['JWT_SECRET_KEY'] = JWT_SECRET_KEY\n JWTManager(app)\n\n app.register_blueprint(code_repositories_blueprint, url_prefix='/api')\n app.register_blueprint(notebooks_blueprint, url_prefix='/api')\n\n @app.route('/img/')\n def get_img_file(path):\n return send_from_directory('static/img', path)\n\n @app.route('/js/')\n def get_js_file(path):\n return send_from_directory('static/js', path)\n\n @app.route('/css/')\n def get_css_file(path):\n return send_from_directory('static/css', path)\n\n @app.route('/api/login', methods=['POST'])\n @jwt_optional\n def login():\n personal_access_token = request.json['personalAccessToken']\n github = Github(personal_access_token)\n\n try:\n github.get_user().login\n except:\n return '', 401\n\n access_token = create_access_token(identity=personal_access_token)\n return jsonify(accessToken=access_token), 200\n\n @app.route('/api/me')\n @jwt_required\n def user_info():\n github = Github(get_jwt_identity())\n user: NamedUser = github.get_user()\n\n return jsonify({\n \"avatarUrl\": user.avatar_url,\n \"name\": user.name,\n })\n\n @app.route('/')\n @app.route('/')\n def index(path=None):\n return app.send_static_file('index.html')\n\n return app\n", "sub_path": "src/backend/api_service.py", "file_name": "api_service.py", "file_ext": "py", "file_size_in_byte": 2503, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "flask.Flask", "line_number": 29, "usage_type": "call"}, {"api_name": "config.ALLOW_CORS", "line_number": 31, "usage_type": "name"}, {"api_name": "flask_cors.CORS", "line_number": 34, "usage_type": "call"}, {"api_name": "config.JWT_SECRET_KEY", "line_number": 36, "usage_type": "name"}, {"api_name": "flask_jwt_extended.JWTManager", "line_number": 37, "usage_type": "call"}, {"api_name": "src.backend.api.code_repositories.code_repositories_blueprint", "line_number": 39, "usage_type": "argument"}, {"api_name": "src.backend.api.notebooks.notebooks_blueprint", "line_number": 40, "usage_type": "argument"}, {"api_name": "flask.send_from_directory", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.send_from_directory", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 57, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 57, "usage_type": "name"}, {"api_name": "github.Github", "line_number": 58, "usage_type": "call"}, {"api_name": "github.get_user", "line_number": 61, "usage_type": "call"}, {"api_name": "flask_jwt_extended.create_access_token", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 66, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_optional", "line_number": 55, "usage_type": "name"}, {"api_name": "github.Github", "line_number": 71, "usage_type": "call"}, {"api_name": "flask_jwt_extended.get_jwt_identity", "line_number": 71, "usage_type": "call"}, {"api_name": "github.NamedUser", "line_number": 72, "usage_type": "name"}, {"api_name": "github.get_user", "line_number": 72, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 74, "usage_type": "call"}, {"api_name": "flask_jwt_extended.jwt_required", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "257301667", "text": "import json\nimport pandas as pd\nimport sys\nfrom datetime import datetime as dt\nimport os\n\n\n'''\n Doccano only returns json format, but we want to work with conll format. That's where this script comes in.\n'''\n\n\ndef load(filename):\n ''' load json returned from doccano, one row per entry\n Parameters\n ----------\n filename: str\n \n Return\n ------\n tweets: dict\n Dict of dicts keyed by index\n \n '''\n\n with open(filename, 'r', encoding='utf-8') as f:\n tweets = {i: json.loads(line) for i, line in enumerate(f)}\n return tweets\n\n\ndef label(items):\n '''Takes dict of dicts and returns series of strings\n in conll format, ready to be written\n Note: \\# is changed back to # (so if you load it into doccano again it will disappear)\n \n Parameters\n ----------\n items: dict\n \n Returns\n -------\n Series\n of Tweet strings in conll format\n \n '''\n\n tweets = []\n for item in items.values():\n text = item['text']\n labels = sorted(item['labels'], key=lambda x: x[0])\n x = [text[a:b] for a, b, _ in labels]\n y = [c for _, _, c in labels]\n tweet_string = '\\n'.join(['{}\\t{}'.format(\n token if token != '\\#' else '#', tag) for token, tag in zip(x, y)])\n tweets.append(tweet_string + '\\n')\n return pd.Series(tweets)\n\n\ndef write(tweets, filename='../data/' + dt.now().strftime(\"%d_%m_%y\") + '_doccano_labelled.txt'):\n ''' take strings in conll format and save them to file\n \n Parameters\n ----------\n tweets: Series\n Series of tweet strings in conll format\n filename: str\n Where to save the conll formatted text file\n \n Returns\n -------\n bool\n True if success, else false\n \n '''\n\n mode = 'ab' if os.path.exists(filename) else 'wb'\n try:\n with open(filename, mode) as f:\n for tweet in tweets:\n f.write((tweet + '\\n').encode('utf-8'))\n print(f'Saved to {filename}')\n return True\n except Exception as e:\n print(e)\n return False\n\n\nif __name__ == '__main__':\n assert len(sys.argv) == 2, '1 argument accepted.'\n\n tweets = load(sys.argv[1])\n labelled_tweets = label(tweets)\n write(labelled_tweets)\n", "sub_path": "src/json_to_conll.py", "file_name": "json_to_conll.py", "file_ext": "py", "file_size_in_byte": 2267, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "json.loads", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 59, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 59, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 89, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 91, "usage_type": "attribute"}]} +{"seq_id": "330219210", "text": "# coding: utf-8\n\n\"\"\"\n SendinBlue API\n\n SendinBlue provide a RESTFul API that can be used with any languages. With this API, you will be able to : - Manage your campaigns and get the statistics - Manage your contacts - Send transactional Emails and SMS - and much more... You can download our wrappers at https://github.com/orgs/sendinblue **Possible responses** | Code | Message | | :-------------: | ------------- | | 200 | OK. Successful Request | | 201 | OK. Successful Creation | | 202 | OK. Request accepted | | 204 | OK. Successful Update/Deletion | | 400 | Error. Bad Request | | 401 | Error. Authentication Needed | | 402 | Error. Not enough credit, plan upgrade needed | | 403 | Error. Permission denied | | 404 | Error. Object does not exist | | 405 | Error. Method not allowed | | 406 | Error. Not Acceptable | # noqa: E501\n\n OpenAPI spec version: 3.0.0\n Contact: contact@sendinblue.com\n Generated by: https://github.com/swagger-api/swagger-codegen.git\n\"\"\"\n\n\nimport pprint\nimport re # noqa: F401\n\nimport six\n\n\nclass GetExtendedList(object):\n \"\"\"NOTE: This class is auto generated by the swagger code generator program.\n\n Do not edit the class manually.\n \"\"\"\n\n \"\"\"\n Attributes:\n swagger_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n swagger_types = {\n 'id': 'int',\n 'name': 'str',\n 'total_blacklisted': 'int',\n 'total_subscribers': 'int',\n 'folder_id': 'int',\n 'created_at': 'datetime',\n 'campaign_stats': 'list[GetExtendedListCampaignStats]',\n 'dynamic_list': 'bool'\n }\n\n attribute_map = {\n 'id': 'id',\n 'name': 'name',\n 'total_blacklisted': 'totalBlacklisted',\n 'total_subscribers': 'totalSubscribers',\n 'folder_id': 'folderId',\n 'created_at': 'createdAt',\n 'campaign_stats': 'campaignStats',\n 'dynamic_list': 'dynamicList'\n }\n\n def __init__(self, id=None, name=None, total_blacklisted=None, total_subscribers=None, folder_id=None, created_at=None, campaign_stats=None, dynamic_list=None): # noqa: E501\n \"\"\"GetExtendedList - a model defined in Swagger\"\"\" # noqa: E501\n\n self._id = None\n self._name = None\n self._total_blacklisted = None\n self._total_subscribers = None\n self._folder_id = None\n self._created_at = None\n self._campaign_stats = None\n self._dynamic_list = None\n self.discriminator = None\n\n self.id = id\n self.name = name\n self.total_blacklisted = total_blacklisted\n self.total_subscribers = total_subscribers\n self.folder_id = folder_id\n self.created_at = created_at\n if campaign_stats is not None:\n self.campaign_stats = campaign_stats\n if dynamic_list is not None:\n self.dynamic_list = dynamic_list\n\n @property\n def id(self):\n \"\"\"Gets the id of this GetExtendedList. # noqa: E501\n\n ID of the list # noqa: E501\n\n :return: The id of this GetExtendedList. # noqa: E501\n :rtype: int\n \"\"\"\n return self._id\n\n @id.setter\n def id(self, id):\n \"\"\"Sets the id of this GetExtendedList.\n\n ID of the list # noqa: E501\n\n :param id: The id of this GetExtendedList. # noqa: E501\n :type: int\n \"\"\"\n if id is None:\n raise ValueError(\"Invalid value for `id`, must not be `None`\") # noqa: E501\n\n self._id = id\n\n @property\n def name(self):\n \"\"\"Gets the name of this GetExtendedList. # noqa: E501\n\n Name of the list # noqa: E501\n\n :return: The name of this GetExtendedList. # noqa: E501\n :rtype: str\n \"\"\"\n return self._name\n\n @name.setter\n def name(self, name):\n \"\"\"Sets the name of this GetExtendedList.\n\n Name of the list # noqa: E501\n\n :param name: The name of this GetExtendedList. # noqa: E501\n :type: str\n \"\"\"\n if name is None:\n raise ValueError(\"Invalid value for `name`, must not be `None`\") # noqa: E501\n\n self._name = name\n\n @property\n def total_blacklisted(self):\n \"\"\"Gets the total_blacklisted of this GetExtendedList. # noqa: E501\n\n Number of blacklisted contacts in the list # noqa: E501\n\n :return: The total_blacklisted of this GetExtendedList. # noqa: E501\n :rtype: int\n \"\"\"\n return self._total_blacklisted\n\n @total_blacklisted.setter\n def total_blacklisted(self, total_blacklisted):\n \"\"\"Sets the total_blacklisted of this GetExtendedList.\n\n Number of blacklisted contacts in the list # noqa: E501\n\n :param total_blacklisted: The total_blacklisted of this GetExtendedList. # noqa: E501\n :type: int\n \"\"\"\n if total_blacklisted is None:\n raise ValueError(\"Invalid value for `total_blacklisted`, must not be `None`\") # noqa: E501\n\n self._total_blacklisted = total_blacklisted\n\n @property\n def total_subscribers(self):\n \"\"\"Gets the total_subscribers of this GetExtendedList. # noqa: E501\n\n Number of contacts in the list # noqa: E501\n\n :return: The total_subscribers of this GetExtendedList. # noqa: E501\n :rtype: int\n \"\"\"\n return self._total_subscribers\n\n @total_subscribers.setter\n def total_subscribers(self, total_subscribers):\n \"\"\"Sets the total_subscribers of this GetExtendedList.\n\n Number of contacts in the list # noqa: E501\n\n :param total_subscribers: The total_subscribers of this GetExtendedList. # noqa: E501\n :type: int\n \"\"\"\n if total_subscribers is None:\n raise ValueError(\"Invalid value for `total_subscribers`, must not be `None`\") # noqa: E501\n\n self._total_subscribers = total_subscribers\n\n @property\n def folder_id(self):\n \"\"\"Gets the folder_id of this GetExtendedList. # noqa: E501\n\n ID of the folder # noqa: E501\n\n :return: The folder_id of this GetExtendedList. # noqa: E501\n :rtype: int\n \"\"\"\n return self._folder_id\n\n @folder_id.setter\n def folder_id(self, folder_id):\n \"\"\"Sets the folder_id of this GetExtendedList.\n\n ID of the folder # noqa: E501\n\n :param folder_id: The folder_id of this GetExtendedList. # noqa: E501\n :type: int\n \"\"\"\n if folder_id is None:\n raise ValueError(\"Invalid value for `folder_id`, must not be `None`\") # noqa: E501\n\n self._folder_id = folder_id\n\n @property\n def created_at(self):\n \"\"\"Gets the created_at of this GetExtendedList. # noqa: E501\n\n Creation UTC date-time of the list (YYYY-MM-DDTHH:mm:ss.SSSZ) # noqa: E501\n\n :return: The created_at of this GetExtendedList. # noqa: E501\n :rtype: datetime\n \"\"\"\n return self._created_at\n\n @created_at.setter\n def created_at(self, created_at):\n \"\"\"Sets the created_at of this GetExtendedList.\n\n Creation UTC date-time of the list (YYYY-MM-DDTHH:mm:ss.SSSZ) # noqa: E501\n\n :param created_at: The created_at of this GetExtendedList. # noqa: E501\n :type: datetime\n \"\"\"\n if created_at is None:\n raise ValueError(\"Invalid value for `created_at`, must not be `None`\") # noqa: E501\n\n self._created_at = created_at\n\n @property\n def campaign_stats(self):\n \"\"\"Gets the campaign_stats of this GetExtendedList. # noqa: E501\n\n\n :return: The campaign_stats of this GetExtendedList. # noqa: E501\n :rtype: list[GetExtendedListCampaignStats]\n \"\"\"\n return self._campaign_stats\n\n @campaign_stats.setter\n def campaign_stats(self, campaign_stats):\n \"\"\"Sets the campaign_stats of this GetExtendedList.\n\n\n :param campaign_stats: The campaign_stats of this GetExtendedList. # noqa: E501\n :type: list[GetExtendedListCampaignStats]\n \"\"\"\n\n self._campaign_stats = campaign_stats\n\n @property\n def dynamic_list(self):\n \"\"\"Gets the dynamic_list of this GetExtendedList. # noqa: E501\n\n Status telling if the list is dynamic or not (true=dynamic, false=not dynamic) # noqa: E501\n\n :return: The dynamic_list of this GetExtendedList. # noqa: E501\n :rtype: bool\n \"\"\"\n return self._dynamic_list\n\n @dynamic_list.setter\n def dynamic_list(self, dynamic_list):\n \"\"\"Sets the dynamic_list of this GetExtendedList.\n\n Status telling if the list is dynamic or not (true=dynamic, false=not dynamic) # noqa: E501\n\n :param dynamic_list: The dynamic_list of this GetExtendedList. # noqa: E501\n :type: bool\n \"\"\"\n\n self._dynamic_list = dynamic_list\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n if issubclass(GetExtendedList, dict):\n for key, value in self.items():\n result[key] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n return pprint.pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"For `print` and `pprint`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, GetExtendedList):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n", "sub_path": "sib_api_v3_sdk/models/get_extended_list.py", "file_name": "get_extended_list.py", "file_ext": "py", "file_size_in_byte": 10473, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "six.iteritems", "line_number": 277, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 302, "usage_type": "call"}]} +{"seq_id": "74342712", "text": "# -*- coding: utf-8 -*-\nfrom pybloomberg import BloombergAPIConnector\nimport pybloomberg.queries as queries\nimport datetime as dt\n\n\nif __name__ == '__main__':\n\twith BloombergAPIConnector() as conn:\n\t\tprecos = queries.get_historical_data(conn,\n\t\t\t[\"IBM US Equity\", \"MSFT US Equity\"],\n\t\t\t\"PX_LAST\",\n\t\t\tdt.date(2015, 1, 1),\n\t\t\tdt.date(2016, 1, 1),\n\t\t\t).as_dataframe()\n\t\tprint(precos)", "sub_path": "test_as_dataframe.py", "file_name": "test_as_dataframe.py", "file_ext": "py", "file_size_in_byte": 380, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pybloomberg.BloombergAPIConnector", "line_number": 8, "usage_type": "call"}, {"api_name": "pybloomberg.queries.get_historical_data", "line_number": 9, "usage_type": "call"}, {"api_name": "pybloomberg.queries", "line_number": 9, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 12, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "432483669", "text": "# CamJam Edukit 3- Robotics\n# Worksheet 4 - Driving and turning\n\nimport RPi.GPIO as GPIO # Import the GPIO Library\nimport time # import the time libray\nimport sys\nfrom random import randint\n\nfrom motor import Motor\n\n#set variables for the GPIO modes\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\n\n#set variables for the GPIO monotor pins\nmotorA = Motor(9,10)\nmotorB = Motor(8,7)\n\n#TURN all motors off\ndef stopMotors():\n motorA.stopMotors()\n motorB.stopMotors()\n\n#turn BOTH MOTOROS fowards \ndef fowards (): \n motorA.fowards()\n motorB.fowards()\n\n#turn BOTH MOTOROS backwards\ndef backwards ():\n motorA.backwards()\n motorB.backwards()\n\n#turn left\ndef right():\n motorA.backwards()\n motorB.fowards()\n\n#turn left\ndef left():\n motorA.fowards()\n motorB.backwards()\n\ndef main(argv):\n try:\n directions = {0: fowards,\n 1: backwards,\n 2: left,\n 3: right}\n while True:\n dir = randint(0,3)\n directions[dir]()\n time.sleep(1)\n stopMotors()\n time.sleep(0.2)\n except KeyboardInterrupt:\n GPIO.cleanup()\n quit()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n\n\n", "sub_path": "4-roboCrazy.py", "file_name": "4-roboCrazy.py", "file_ext": "py", "file_size_in_byte": 1143, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "RPi.GPIO.setmode", "line_number": 12, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 12, "usage_type": "name"}, {"api_name": "RPi.GPIO.BCM", "line_number": 12, "usage_type": "attribute"}, {"api_name": "RPi.GPIO.setwarnings", "line_number": 13, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 13, "usage_type": "name"}, {"api_name": "motor.Motor", "line_number": 16, "usage_type": "call"}, {"api_name": "motor.Motor", "line_number": 17, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 51, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 53, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 55, "usage_type": "call"}, {"api_name": "RPi.GPIO.cleanup", "line_number": 57, "usage_type": "call"}, {"api_name": "RPi.GPIO", "line_number": 57, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 61, "usage_type": "attribute"}]} +{"seq_id": "582125247", "text": "import recreationdotgov.recreationlab as recreationlab\n\nfrom kivy.lang import Builder\n\nfrom kivymd.app import MDApp\nfrom kivymd.uix.floatlayout import MDFloatLayout\nfrom kivymd.uix.boxlayout import MDBoxLayout\nfrom kivymd.uix.tab import MDTabsBase\nfrom kivymd.uix.toolbar import MDToolbar\nfrom kivymd.uix.tab import MDTabs\nfrom kivymd.uix.button import MDFlatButton, MDRectangleFlatButton\nfrom kivymd.uix.picker import MDDatePicker\nfrom kivymd.uix.textfield import MDTextField\nfrom kivy.uix.widget import Widget\nfrom kivymd.uix.gridlayout import MDGridLayout\nfrom kivymd.uix.label import MDLabel\nfrom kivymd.uix.textfield import MDTextField\n\n# class Tab(MDBoxLayout, MDTabsBase):\n# '''Class implementing content for a tab.'''\n# class Tab_Grid_not_used(MDGridLayout,MDTabsBase):\n# '''Class implementing content for a tab.'''\n# def __init__(self, **kwargs):\n# super().__init__(**kwargs)\n# # self.orientation = 'vertical'\n# # self.padding = 2\n# self.padding = \"5dp\"\n# # self.size = self.parent.size\n# # self.adaptive_size = True \n# self.cols = 3\n# self.rows = 3\n \nclass Tab(MDBoxLayout,MDTabsBase):\n '''Class implementing content for a tab.'''\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.orientation = 'vertical'\n # self.padding = 2\n self.padding = \"5dp\"\n self.pos_hint = {\"center_x\": .5, \"top\": 1}\n self.size_hint_x = .8\n self.size_hint_y = 1\n # self.size = self.parent.size\n # self.adaptive_size = True \n # self.cols = 3\n # self.rows = 3\n\nclass SearchResult(MDTextField):\n \"\"\"...\"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n # MDTextField:\n self.size_hint_x = .8\n self.size_hint_y = 1\n self.hint_text = \"search result\"\n self.max_height = \"200dp\"\n self.mode = \"fill\"\n # fill_color: 0, 0, 0, .4\n self.multiline = True\n self.pos_hint = {\"center_x\": .5, \"top\": 1}\n # self.pos_hint = {\"center_x\": .5, \"center_y\": .5}\n self.name = 'id_text_search_res'\n # self.text = \"asdfasdfasdfasd asdf asdfasdfa asdfasdf asdf asdfasdf\"\n \n\nclass StandardButton(MDRectangleFlatButton):\n \"\"\"my standard buttons\"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.padding = 5\n \n\nclass StandardInputField(MDTextField):\n \"\"\"my standard textfield\"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n # self.hint_text = 'mm/dd/yyyy'\n \nclass InputCampgroundField(StandardInputField):\n \"\"\"my standard textfield\"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.hint_text = 'Site ID'\n self.name = 'id_input_site'\n self.text = '232462'\n\nclass InputCampgroundFieldplus(StandardInputField):\n \"\"\"my standard textfield\"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.hint_text = 'Site ID'\n # self.name = 'id_input_site'\n\nclass InputDateField(StandardInputField):\n \"\"\"my standard textfield\"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.hint_text = 'mm/dd/yyyy'\n\nclass InputCheckIn(InputDateField):\n \"\"\"my standard textfield\"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.name= 'id_input_checkin'\n self.text = '05/12/2021'\n\nclass InputNoOfNights(StandardInputField):\n \"\"\"my standard textfield\"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.hint_text = '# of nights'\n self.name = 'id_input_no_of_nights'\n self.text = '1'\n \nclass StandardLabel(MDLabel):\n \"\"\"my standard label\"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.size_hint = (None,None)\n self.height = 35\n \n \nclass SaveButton(StandardButton):\n \"\"\"sdf\"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.text = 'save'\n # MDApp.get_running_app().root()\n # self.App.get_running_app()\n # self.bind('on_press', self.on_save)\n \n def on_press(self, **kwargs):\n print(self.parent.size)\n \n\nclass SearchButton(StandardButton):\n \"\"\"sdf\"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.text = 'search'\n self.padding = 5\n # self.bind('on_press', self.on_save)\n \n def on_press(self, **kwargs):\n site_id = MDApp.get_running_app().get_widget_by_name('id_input_site').text\n checkin = MDApp.get_running_app().get_widget_by_name('id_input_checkin').text\n no_of_nights = MDApp.get_running_app().get_widget_by_name('id_input_no_of_nights').text\n print(f'{site_id}, {checkin}, {no_of_nights}')\n \n cg = recreationlab.Campground(site_id)\n cgc = recreationlab.CampgroundCollection([cg])\n resmessage = cgc.make_query(checkin_date=checkin, no_of_nights=int(no_of_nights))['res_msg']\n MDApp.get_running_app().get_widget_by_name('id_text_search_res').text = resmessage\n print(resmessage)\n\nclass SelectDateButton(StandardButton):\n \"\"\"sdf\"\"\"\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.text = 'pick'\n # self.padding = 10\n \n def on_press(self, **kwargs):\n date_dialog = MDDatePicker(lambda x: x)\n # date_dialog = MDDatePicker(lambda x: x, mode = 'range')\n self.date_dialog = date_dialog\n # self.date_dialog.padding = 10\n date_dialog.bind(on_save=self.on_save, on_cancel=self.on_cancel)\n date_dialog.open()\n \n \n def on_save(self):\n print(self.parent.size)\n \n pass\n \n def on_cancel(self):\n pass\n \n# class StartDateContainer(MDBoxLayout):\n# def __init__(self, **kwargs):\n# super().__init__(**kwargs)\n# # self.adaptive_width = True\n# self.orientation = 'horizontal'\n# self.padding = 2\n# self.add_widget(SelectDateButton())\n# self.add_widget(SelectDateButton())\n# self.start_date_text = StandardField()\n# self.start_date_text.hint_text = 'mm/dd/yyyy'\n# self.add_widget(self.start_date_text)\n \n\n\n\nclass Example(MDApp):\n def build(self):\n # box around evertying\n box_global = MDBoxLayout()\n box_global.name = 'bababallsd'\n box_global.orientation = 'vertical'\n # add the toolbar\n box_global.add_widget(MDToolbar(title = 'buba'))\n \n # add the tabs\n tabs = MDTabs()\n box_global.add_widget(tabs)\n \n #### The search tab\n # tab = MDTabsBase()\n tab = Tab()\n tab.spacing = 10\n tab.text = 'search'\n tabs.add_widget(tab)\n \n #### # the search grid\n grid = MDGridLayout()\n grid.name = 'grid'\n tab.add_widget(grid)\n grid.cols = 3\n grid.rows = 4\n grid.spacing = 5\n \n grid.add_widget(InputCampgroundField())\n grid.add_widget(InputCampgroundFieldplus())\n grid.add_widget(InputCampgroundFieldplus())\n \n grid.add_widget(StandardLabel(text = 'Check-in'))\n grid.add_widget(SelectDateButton())\n grid.add_widget(InputCheckIn())\n \n grid.add_widget(StandardLabel(text = 'Check-out'))\n # grid.add_widget(SelectDateButton())\n grid.add_widget(InputNoOfNights())\n grid.add_widget(InputDateField())\n \n grid.add_widget(SearchButton())\n grid.add_widget(SaveButton())\n \n #### # search result\n tab.add_widget(SearchResult())\n # tab.padding_top = 3\n # tab.add_widget(Widget())\n \n #### collections tab\n tab = Tab()\n tab.spacing = 10\n tab.text = 'collections'\n tabs.add_widget(tab)\n \n #### workplan\n tab = Tab()\n tab.spacing = 10\n tab.text = 'collections'\n tabs.add_widget(tab)\n \n return box_global\n \n def get_widget_by_name(self, name):\n widgets = []\n for widget in self.root.walk():\n try:\n if widget.name == name:\n widgets.append(widget)\n # print(\"{} -> {}\".format(widget, widget.ids))\n # print(\"{} -> {}\".format(widget, widget.name))\n except:\n pass\n if len(widgets) == 0:\n raise KeyError(f'No widgets named {name}')\n elif len(widgets) > 1:\n raise KeyError(f'More than 1 widget named {name}')\n \n return widgets[0]\n \n \n \n def bla(self):\n print('bla')\n # def build(self):\n # # box around evertying\n # box_global = MDBoxLayout()\n # box_global.orientation = 'vertical'\n # # add the toolbar\n # box_global.add_widget(MDToolbar(title = 'buba'))\n \n # # add the tabs\n # tabs = MDTabs()\n # box_global.add_widget(tabs)\n \n # # The search tab\n # tab = MDTabsBase()\n # tab.text = 'search'\n # tabs.add_widget(tab)\n \n # box = MDBoxLayout()\n # tab.add_widget(box)\n # box.orientation = 'vertical'\n \n # # button = MDFlatButton()\n # box.add_widget(StartDateContainer())\n # box.add_widget(SaveButton())\n \n # return box_global\n \n \n \n # return Builder.load_string(KV)\n\n # def on_start(self):\n # for i in range(6):\n # tab = Tab()\n # tab.text = f\"Tab {i}\"\n # self.root.ids.tabs.add_widget(tab)\n # # self.root.ids.tabs.add_widget(Tab(title=f\"Tab {i}\"))\n\n\n # def on_tab_switch(\n # self, instance_tabs, instance_tab, instance_tab_label, tab_text\n # ):\n # '''Called when switching tabs.\n\n # :type instance_tabs: ;\n # :param instance_tab: <__main__.Tab object>;\n # :param instance_tab_label: ;\n # :param tab_text: text or name icon of tab;\n # '''\n # pass\n # # print( instance_tab.ids.label.text)\n # # instance_tab.ids.label.text = tab_text\n # # print(type(instance_tab.ids.label))\n\n\nki = Example().run()", "sub_path": "recreationdotgov/view.py", "file_name": "view.py", "file_ext": "py", "file_size_in_byte": 10378, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "kivymd.uix.boxlayout.MDBoxLayout", "line_number": 33, "usage_type": "name"}, {"api_name": "kivymd.uix.tab.MDTabsBase", "line_number": 33, "usage_type": "name"}, {"api_name": "kivymd.uix.textfield.MDTextField", "line_number": 48, "usage_type": "name"}, {"api_name": "kivymd.uix.button.MDRectangleFlatButton", "line_number": 66, "usage_type": "name"}, {"api_name": "kivymd.uix.textfield.MDTextField", "line_number": 73, "usage_type": "name"}, {"api_name": "kivymd.uix.label.MDLabel", "line_number": 115, "usage_type": "name"}, {"api_name": "kivymd.app.MDApp.get_running_app", "line_number": 145, "usage_type": "call"}, {"api_name": "kivymd.app.MDApp", "line_number": 145, "usage_type": "name"}, {"api_name": "kivymd.app.MDApp.get_running_app", "line_number": 146, "usage_type": "call"}, {"api_name": "kivymd.app.MDApp", "line_number": 146, "usage_type": "name"}, {"api_name": "kivymd.app.MDApp.get_running_app", "line_number": 147, "usage_type": "call"}, {"api_name": "kivymd.app.MDApp", "line_number": 147, "usage_type": "name"}, {"api_name": "recreationdotgov.recreationlab.Campground", "line_number": 150, "usage_type": "call"}, {"api_name": "recreationdotgov.recreationlab", "line_number": 150, "usage_type": "name"}, {"api_name": "recreationdotgov.recreationlab.CampgroundCollection", "line_number": 151, "usage_type": "call"}, {"api_name": "recreationdotgov.recreationlab", "line_number": 151, "usage_type": "name"}, {"api_name": "kivymd.app.MDApp.get_running_app", "line_number": 153, "usage_type": "call"}, {"api_name": "kivymd.app.MDApp", "line_number": 153, "usage_type": "name"}, {"api_name": "kivymd.uix.picker.MDDatePicker", "line_number": 164, "usage_type": "call"}, {"api_name": "kivymd.app.MDApp", "line_number": 195, "usage_type": "name"}, {"api_name": "kivymd.uix.boxlayout.MDBoxLayout", "line_number": 198, "usage_type": "call"}, {"api_name": "kivymd.uix.toolbar.MDToolbar", "line_number": 202, "usage_type": "call"}, {"api_name": "kivymd.uix.tab.MDTabs", "line_number": 205, "usage_type": "call"}, {"api_name": "kivymd.uix.gridlayout.MDGridLayout", "line_number": 216, "usage_type": "call"}]} +{"seq_id": "317904692", "text": "#!/usr/bin/env python\n\nimport matplotlib as mpl\nmpl.use('pdf') # .pdf is a way better than .png\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\ndat = np.genfromtxt('energy.dat')\nx = []\ny = []\n\nfor k in range(0,14):\n x.append(dat[k][0])\n y.append(dat[k][1])\n\nx = np.sort(x)\ny = np.sort(y)\n\nplt.plot(x,y,'o-')\nplt.savefig('energy.pdf')\n", "sub_path": "radiation/data/energy.py", "file_name": "energy.py", "file_ext": "py", "file_size_in_byte": 345, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "matplotlib.use", "line_number": 4, "usage_type": "call"}, {"api_name": "numpy.genfromtxt", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.sort", "line_number": 17, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}]} +{"seq_id": "135373735", "text": "\"\"\"Base Logger.\"\"\"\n# flake8: noqa\nfrom contextlib import contextmanager\nimport csv\nimport datetime\nfrom enum import Enum\nimport json\nimport os\nimport os.path as osp\nimport sys\n\nimport dateutil.tz\nimport joblib\nimport numpy as np\n\nfrom metaworlds.misc.console import colorize, mkdir_p\nfrom metaworlds.misc.logger.tabulate import tabulate\n\n\nclass TerminalTablePrinter:\n def __init__(self):\n self.headers = None\n self.tabulars = []\n\n def print_tabular(self, new_tabular):\n if self.headers is None:\n self.headers = [x[0] for x in new_tabular]\n else:\n assert len(self.headers) == len(new_tabular)\n self.tabulars.append([x[1] for x in new_tabular])\n self.refresh()\n\n def refresh(self):\n import os\n rows, columns = os.popen('stty size', 'r').read().split()\n tabulars = self.tabulars[-(int(rows) - 3):]\n sys.stdout.write(\"\\x1b[2J\\x1b[H\")\n sys.stdout.write(tabulate(tabulars, self.headers))\n sys.stdout.write(\"\\n\")\n\n\nclass MyEncoder(json.JSONEncoder):\n def default(self, o):\n if isinstance(o, type):\n return {'$class': o.__module__ + \".\" + o.__name__}\n elif isinstance(o, Enum):\n return {\n '$enum':\n o.__module__ + \".\" + o.__class__.__name__ + '.' + o.name\n }\n return json.JSONEncoder.default(self, o)\n\n\nclass Logger():\n def __init__(self):\n self._prefixes = []\n self._prefix_str = ''\n\n self._tabular_prefixes = []\n self._tabular_prefix_str = ''\n\n self._tabular = []\n\n self._text_outputs = []\n self._tabular_outputs = []\n\n self._text_fds = {}\n self._tabular_fds = {}\n self._tabular_header_written = set()\n\n self._snapshot_dir = None\n self._snapshot_mode = 'all'\n self._snapshot_gap = 1\n\n self._log_tabular_only = False\n self._header_printed = False\n\n self.table_printer = TerminalTablePrinter()\n\n def _add_output(self, file_name, arr, fds, mode='a'):\n if file_name not in arr:\n mkdir_p(os.path.dirname(file_name))\n arr.append(file_name)\n fds[file_name] = open(file_name, mode)\n\n def _remove_output(self, file_name, arr, fds):\n if file_name in arr:\n fds[file_name].close()\n del fds[file_name]\n arr.remove(file_name)\n\n def push_prefix(self, prefix):\n self._prefixes.append(prefix)\n self._prefix_str = ''.join(self._prefixes)\n\n def add_text_output(self, file_name):\n self._add_output(\n file_name, self._text_outputs, self._text_fds, mode='a')\n\n def remove_text_output(self, file_name):\n self._remove_output(file_name, self._text_outputs, self._text_fds)\n\n def add_tabular_output(self, file_name):\n self._add_output(\n file_name, self._tabular_outputs, self._tabular_fds, mode='w')\n\n def remove_tabular_output(self, file_name):\n if self._tabular_fds[file_name] in self._tabular_header_written:\n self._tabular_header_written.remove(self._tabular_fds[file_name])\n self._remove_output(file_name, self._tabular_outputs,\n self._tabular_fds)\n\n def set_snapshot_dir(self, dir_name):\n mkdir_p(dir_name)\n self._snapshot_dir = dir_name\n\n def get_snapshot_dir(self):\n return self._snapshot_dir\n\n def get_snapshot_mode(self):\n return self._snapshot_mode\n\n def set_snapshot_mode(self, mode):\n self._snapshot_mode = mode\n\n def get_snapshot_gap(self):\n return self._snapshot_gap\n\n def set_snapshot_gap(self, gap):\n self._snapshot_gap = gap\n\n def set_log_tabular_only(self, log_tabular_only):\n self._log_tabular_only = log_tabular_only\n\n def get_log_tabular_only(self):\n return self._log_tabular_only\n\n def log(self, s, with_prefix=True, with_timestamp=True, color=None):\n out = s\n if with_prefix:\n out = self._prefix_str + out\n # out_basic holds output with a simpler timestamp for stdout\n out_basic = out\n if with_timestamp:\n now = datetime.datetime.now(dateutil.tz.tzlocal())\n timestamp_basic = now.strftime('%Y-%m-%d %H:%M:%S')\n timestamp = now.strftime('%Y-%m-%d %H:%M:%S.%f %Z')\n out_basic = \"%s | %s\" % (timestamp_basic, out_basic)\n out = \"%s | %s\" % (timestamp, out)\n if color is not None:\n out = colorize(out, color)\n out_basic = colorize(out_basic, color)\n if not self._log_tabular_only:\n # Also log to stdout\n print(out_basic)\n for fd in list(self._text_fds.values()):\n fd.write(out + '\\n')\n fd.flush()\n sys.stdout.flush()\n\n def push_tabular_prefix(self, key):\n self._tabular_prefixes.append(key)\n self._tabular_prefix_str = ''.join(self._tabular_prefixes)\n\n def pop_tabular_prefix(self):\n del self._tabular_prefixes[-1]\n self._tabular_prefix_str = ''.join(self._tabular_prefixes)\n\n @contextmanager\n def prefix(self, key):\n self.push_prefix(key)\n try:\n yield\n finally:\n self.pop_prefix()\n\n @contextmanager\n def tabular_prefix(self, key):\n self.push_tabular_prefix(key)\n yield\n self.pop_tabular_prefix()\n\n def record_tabular(self, key, val):\n self._tabular.append((self._tabular_prefix_str + str(key), str(val)))\n\n def dump_tabular(self, *args, **kwargs):\n wh = kwargs.pop(\"write_header\", None)\n if self._tabular:\n if self._log_tabular_only:\n self.table_printer.print_tabular(self._tabular)\n else:\n for line in tabulate(self._tabular).split('\\n'):\n self.log(line, *args, **kwargs)\n tabular_dict = dict(self._tabular)\n\n # Also write to the csv files\n # This assumes that the keys in each iteration won't change!\n for tabular_fd in list(self._tabular_fds.values()):\n writer = csv.DictWriter(\n tabular_fd, fieldnames=list(tabular_dict.keys()))\n if wh or (wh is None\n and tabular_fd not in self._tabular_header_written):\n writer.writeheader()\n self._tabular_header_written.add(tabular_fd)\n writer.writerow(tabular_dict)\n tabular_fd.flush()\n del self._tabular[:]\n\n def pop_prefix(self):\n del self._prefixes[-1]\n self._prefix_str = ''.join(self._prefixes)\n\n def save_itr_params(self, itr, params):\n if self._snapshot_dir:\n if self._snapshot_mode == 'all':\n file_name = osp.join(self._snapshot_dir, 'itr_%d.pkl' % itr)\n joblib.dump(params, file_name, compress=3)\n elif self._snapshot_mode == 'last':\n # override previous params\n file_name = osp.join(self._snapshot_dir, 'params.pkl')\n joblib.dump(params, file_name, compress=3)\n elif self._snapshot_mode == \"gap\":\n if itr % self._snapshot_gap == 0:\n file_name = osp.join(self._snapshot_dir,\n 'itr_%d.pkl' % itr)\n joblib.dump(params, file_name, compress=3)\n elif self._snapshot_mode == 'none':\n pass\n else:\n raise NotImplementedError\n\n def log_parameters_lite(self, log_file, args):\n log_params = {}\n for param_name, param_value in args.__dict__.items():\n log_params[param_name] = param_value\n if args.args_data is not None:\n log_params[\"json_args\"] = dict()\n mkdir_p(os.path.dirname(log_file))\n with open(log_file, \"w\") as f:\n json.dump(log_params, f, indent=2, sort_keys=True, cls=MyEncoder)\n\n def log_variant(self, log_file, variant_data):\n mkdir_p(os.path.dirname(log_file))\n if hasattr(variant_data, \"dump\"):\n variant_data = variant_data.dump()\n with open(log_file, \"w\") as f:\n json.dump(variant_data, f, indent=2, sort_keys=True, cls=MyEncoder)\n\n def record_tabular_misc_stat(self, key, values, placement='back'):\n if placement == 'front':\n prefix = \"\"\n suffix = key\n else:\n prefix = key\n suffix = \"\"\n if values:\n self.record_tabular(prefix + \"Average\" + suffix,\n np.average(values))\n self.record_tabular(prefix + \"Std\" + suffix, np.std(values))\n self.record_tabular(prefix + \"Median\" + suffix, np.median(values))\n self.record_tabular(prefix + \"Min\" + suffix, np.min(values))\n self.record_tabular(prefix + \"Max\" + suffix, np.max(values))\n else:\n self.record_tabular(prefix + \"Average\" + suffix, np.nan)\n self.record_tabular(prefix + \"Std\" + suffix, np.nan)\n self.record_tabular(prefix + \"Median\" + suffix, np.nan)\n self.record_tabular(prefix + \"Min\" + suffix, np.nan)\n self.record_tabular(prefix + \"Max\" + suffix, np.nan)\n\n def reset(self):\n pass\n", "sub_path": "src/metaworlds/misc/logger/base_logger.py", "file_name": "base_logger.py", "file_ext": "py", "file_size_in_byte": 9331, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.popen", "line_number": 35, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 37, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 37, "usage_type": "attribute"}, {"api_name": "sys.stdout.write", "line_number": 38, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 38, "usage_type": "attribute"}, {"api_name": "metaworlds.misc.logger.tabulate.tabulate", "line_number": 38, "usage_type": "call"}, {"api_name": "sys.stdout.write", "line_number": 39, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 39, "usage_type": "attribute"}, {"api_name": "json.JSONEncoder", "line_number": 42, "usage_type": "attribute"}, {"api_name": "enum.Enum", "line_number": 46, "usage_type": "argument"}, {"api_name": "json.JSONEncoder.default", "line_number": 51, "usage_type": "call"}, {"api_name": "json.JSONEncoder", "line_number": 51, "usage_type": "attribute"}, {"api_name": "metaworlds.misc.console.mkdir_p", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "metaworlds.misc.console.mkdir_p", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 145, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 145, "usage_type": "attribute"}, {"api_name": "dateutil.tz.tz.tzlocal", "line_number": 145, "usage_type": "call"}, {"api_name": "dateutil.tz.tz", "line_number": 145, "usage_type": "attribute"}, {"api_name": "dateutil.tz", "line_number": 145, "usage_type": "name"}, {"api_name": "metaworlds.misc.console.colorize", "line_number": 151, "usage_type": "call"}, {"api_name": "metaworlds.misc.console.colorize", "line_number": 152, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 159, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 159, "usage_type": "attribute"}, {"api_name": "contextlib.contextmanager", "line_number": 169, "usage_type": "name"}, {"api_name": "contextlib.contextmanager", "line_number": 177, "usage_type": "name"}, {"api_name": "metaworlds.misc.logger.tabulate.tabulate", "line_number": 192, "usage_type": "call"}, {"api_name": "csv.DictWriter", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path", "line_number": 216, "usage_type": "name"}, {"api_name": "joblib.dump", "line_number": 217, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 220, "usage_type": "call"}, {"api_name": "os.path", "line_number": 220, "usage_type": "name"}, {"api_name": "joblib.dump", "line_number": 221, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 224, "usage_type": "call"}, {"api_name": "os.path", "line_number": 224, "usage_type": "name"}, {"api_name": "joblib.dump", "line_number": 226, "usage_type": "call"}, {"api_name": "metaworlds.misc.console.mkdir_p", "line_number": 238, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 238, "usage_type": "call"}, {"api_name": "os.path", "line_number": 238, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 240, "usage_type": "call"}, {"api_name": "metaworlds.misc.console.mkdir_p", "line_number": 243, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 243, "usage_type": "call"}, {"api_name": "os.path", "line_number": 243, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.average", "line_number": 258, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 259, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 261, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 262, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 264, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 265, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 266, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 267, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 268, "usage_type": "attribute"}]} +{"seq_id": "480017751", "text": "# This script is used to extract and save pose estimation of an Aruco marker in a source video and also save a copy of the video with the Aruco marker pose axes drawn.\n# First, the script will run through the source video, attempt to detect a marker, then draw a coordinate frame on the marker.\n# Second, the script will resize the video (specified by the 'out_res' variable) and save a copy of the video in the same directory as the source video.\n# Lastly, a .csv file of time, translation, and rotation will be saved in the same directory as the source video.\n\n# Bookoodles of credit and praise go to Adriana Henriquez for figuring out how to do this stuff and writing the initial framework of the code.\n\n# Last update: 5/20/2020\n\n# PRE-USAGE, OPTIONAL:\n# Use ffmpeg to convert recorded video to acceptable size, framerate, etc., or else the processing portion of this will take 5ever.\n# ex. > $ ffmpeg -ss 5 -i VID_20200328_104239.mp4 -t 84 -an -filter:v \"scale=720:-1, transpose=2, fps=10\" output.mp4\n# ..... will trim video by starting at 5 sec (using -ss parameter) and ending 84 seconds after (using -t parameter)\n# ..... will take *.mp4 file type input (using -i parameter)\n# ..... will remove audio (using -an parameter)\n# ..... will apply multiple filters to the video (using -filter:v paramter followed by a string)\n# ........... scale the video to 720 WIDTH (using the 'scale=720:-1' option)\n# ........... rotate the video 90 degrees counter-clockwise (using the 'transpose=2' option)\n# ........... downsample the framerate to 10 fps (using the 'fps=10' option)\n# ..... will save it to an output file (must complete the command with a destination)\n\n# USAGE:\n# You will need to specify the camera calibration file (defaults to '../images/calib_images/calib.yaml') for whatever camera the source video was captured by.\n# If you do not have a calibration file for that camera, use the files in the '../utilities/' directory to create one.\n# You will also need to specifiy the location of the video from which you wish to extract Aruco pose.\n\n# To run, type the following in the unix command line:\n# >>> $ python getPoseAruco-postProcess.py [path to camera calibration file] [path to source video with aruco marker]\n\n\n# FUTURE WORK:\n# Allow other options to be specified (such as output video resolution, currently defaults to 1280x720)\n# Better filename saves (i.e. include datetime). Currently will just save as \"aruco_output.mp4\", potentially overwriting any file that currently exists\n\nimport cv2\nimport cv2.aruco as aruco\nimport datetime\nimport numpy as np\nimport sys\nimport pandas as pd\nimport os\nimport sys\nimport paho.mqtt.client as mqtt\n\nmarker_side_length = 0.0655 # Specify size of marker. This is a scaling/unit conversion factor. Without it, all of the measurements would just be determined in units of marker length. At 0.0655, this means a single marker is 0.0655 m per side.\n\n# Output video size\nheight = int(480)\nwidth = int(height * 16/9)\n\n# Default filenames and locations\ncalib_loc = '../images/calib_images/calib.yaml'\nvideo_loc = '../videos/'\n\n# Datetime stamp to uniquely label video filename\ndatetime_stamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\t# Format datetime stamp to label unique data files\nvalve_status = 'Off'\t# String to identify what the valve status is (will be updated later to specify commanded DIRECTION rather than valve ID)\nrecordBool = False\n\n\n# -----------------------------------------------------------------------------\n# MQTT SETUP\n# -----------------------------------------------------------------------------\n# The callback for when the client receives a CONNACK response from the server.\ndef on_connect(client, userdata, flags, rc):\n\tprint(\"Connected with result code \"+str(rc))\n\n\t# Subscribing in on_connect() means that if we lose the connection and\n\t# reconnect then subscriptions will be renewed.\n\t# client.subscribe(\"propel\")\n\t# client.subscribe(\"timedPropel\")\n\t# client.subscribe(\"CV\")\n\tclient.subscribe([(\"propel\",2), (\"timedPropel\",2), (\"CV\",2)])\n\t\n \n# The callback for when a PUBLISH message is received from the server.\ndef on_message(client, userdata, msg):\n\tglobal recordBool\n\tmsg.payload = msg.payload.decode(\"utf-8\")\n\tprint(\"incoming: \" + msg.topic + \" \" + str(msg.payload))\n\n\tif msg.topic == \"singleValveOn\":\n\t\tvalve_status = str(msg.payload)\n\t\n\telif msg.topic == \"singleValveOff\":\n\t\tvalve_status = 'Off'\n\t\n\tif msg.topic == \"CV\":\n\t\tif msg.payload == \"recordON\":\n\t\t\tprint(\"Go!\")\n\t\t\trecordBool = True\n\t\t\ttrack_and_record(calib_loc, video_loc)\n\t\tif msg.payload == \"recordOFF\":\n\t\t\tprint(\"Stop!\")\n\t\t\trecordBool = False\n\n\ndef setup_recording(calib_loc=calib_loc, video_loc=video_loc):\n\tif os.path.isfile(calib_loc) & os.path.isdir(video_loc):\n\t\tprint('')\n\t\tprint('Readying Aruco detection on a live webcam feed using the following settings:')\n\t\tprint('\tCalibration file location: {0}'.format(os.path.abspath(calib_loc)))\n\t\tprint('\tVideo save location: {0}'.format(os.path.abspath(video_loc)))\n\t\tprint('')\n\telse:\n\t\tprint('')\n\t\tprint('*** One or more file path is invalid. ***')\n\t\tprint('\tSpecified calibration file location: {0}'.format(os.path.abspath(calib_loc)))\n\t\tprint('')\n\t\tsys.exit()\n\n\t# Import calibration items (camera matrix and distortion coefficients)\n\tprint('Importing calibration file...')\t\n\tcalib_file = cv2.FileStorage(calib_loc, cv2.FILE_STORAGE_READ)\t# Load in camera matrix of distortion correction parameters for the camera used to capture source video, for pose estimation\n\tcameraMatrix = calib_file.getNode(\"camera_matrix\").mat()\t\t# Grab the camera calibration matrix\n\tdistCoeffs = calib_file.getNode(\"dist_coeff\").mat()\t\t\t\t# Grab the camera distortion matrix\n\tprint('Done!')\n\tprint('')\n\n\t# Here we instantiate a capture object that we'll pass into the Aruco detection algorithm. Also we grab the total number of frames to provide the user with a % completion measure during processing.\n\tcap = cv2.VideoCapture(0)\t\t\t\t\t\t\t\t\t\t# Instantiate video capture object 'cap' using DEVICE 0\n\tfps = cap.get(cv2.CAP_PROP_FPS)\t\t\t\t\t\t\t\t\t# Grab the FPS of the source video so you can properly calculate elapsed process time\n\tsource_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) \t\t\t# Grab the source video width\n\tsource_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\t\t\t# Grab the source video height\n\tprint('Source resolution: {0} x {1} px'.format(source_width, source_height))\n\tprint('Source fps: {}'.format(fps))\n\tprint('')\n\n\t# Here we define the codec and create a VideoWriter object to resave the video (with a coordinate frame drawn on the Aruco marker and reduced in size).\n\tfourcc = cv2.VideoWriter_fourcc(*\"XVID\")\t\t\t\t\t\t# Specify the codec used to write the new video\n\tout_loc = video_loc + datetime_stamp + '_record.avi'\t\t\t# Save the video in the same directory as the source video, call it 'aruco_output.mp4'\n\tout_res = (source_width, source_height)\t\t\t\t\t\t\t\t\t\t# Output video resolution (NOTE: This is NOT the video that the Aruco marker will be tracked from. The marker will still be tracked from the source video--this is the output that the coordinate axes are drawn on.)\n\tout = cv2.VideoWriter(out_loc, fourcc, 5, out_res)\t\t\t# Instantiate an object of the output video (to have a coordinate frame drawn on the Aruco marker and resized)\n\n\n\tprint('')\n\tprint('Ready to record and process live video. Processed video will be saved saved to {0}'.format(os.path.abspath(calib_loc)))\n\tprint(\"Send message 'recordON' to topic 'CV' to begin.\")\n\tprint(\"Send message 'recordOFF' to topic 'CV' to stop.\")\n\tprint('')\n\n\n# -----------------------------------------------------------------------------\n# CV LOOP\n# -----------------------------------------------------------------------------\ndef track_and_record(capture, calib_loc=calib_loc, video_loc=video_loc):\n\tprint(\"Getting ready....recordBool is \" + str(recordBool))\n\tstart_time = datetime.datetime.utcnow().timestamp()\n\n\t# Set up the data storage variables to be appended or updated through the algorithm's loop.\n\tpose_transformation = []\t\t\t\t\t\t\t\t\t\t# This is the list which will store the pose transformation values (3x translation, 3x rotation)\n\n\twhile(recordBool):\t\t\t\t\t\t\t\t\t\t\t\t# while(True) means \"run as fast as you can\".\n\t\tprint(\"Going!\")\n\t\t# Capture frame-by-frame\n\t\ttry:\n\t\t\tret, frame = capture.read()\t\t\t\t\t\t\t\t\t# Read the next frame in the buffer and return it as an object\n\t\texcept:\n\t\t\tprint(\"Couldn't read\")\n\t\t\tbreak\n\n\t\t# if (not ret):\t\t\t\t\t\t\t\t\t\t\t# If cap.read() doesn't return anything (i.e. if you've stopped recording)\n\t\t# \tprint(\"\\\"ret\\\" is FALSE\")\n\t\t# \tbreak\t\t\t\t\t\t\t\t\t\t\t\t# Kill the loop\n\n\t\tblur = cv2.GaussianBlur(frame, (11,11), 0)\t\t\t\t# As part of the Aruco marker detection algorithm, we blur the frame\n\t\tgray = cv2.cvtColor(blur, cv2.COLOR_BGR2GRAY)\t\t\t# Next, we make the frame grayscale\n\t\taruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250) \t# Define the shape of the Aruco marker we are trying to detect (6X6_250 is very common)\n\t\tparameters = aruco.DetectorParameters_create()\t\t\t# Not sure what this step does but Adriana put it in and I trust her\n\n\t\ttry:\n\t\t\tprint(\"Trying to detect corners...\")\n\t\t\t# Here is the function that does all the hard work of actually detecting markers\n\t\t\tcorners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)\t# List of ids and the corners belonging to each id\n\n\t\t\t# This is the function that highlights a detected marker(s)\n\t\t\t# The inputs are the image ('frame'), the detected corners ('corners'), and the ids of the detected markers ('ids')\n\t\t\t# This function is only provided for visualization and its use can be omitted without repercussion\n\t\t\t# aruco.drawDetectedMarkers(frame, corners, ids)\n\t\t\t\n\t\t\t# This is the part where we actually estimate pose of each marker\n\t\t\t# We need to use the camera calibration information in order to correctly estimate the pose after correcting for camera distortion\n\t\t\t# The camera pose with respect to a marker is the 3d transformation FROM the marker coordinate system TO the camera coordinate system\n\t\t\t# It is specified by a rotation and a translation vector (rvec and tvec, respectively)\n\t\t\t\t# The 'corners' parameter is the vector of marker corners returned by the detectMarkers() function.\n\t\t\t\t# The second parameter is the size of the marker side in meters or in any other unit. Note that the translation vectors of the estimated poses will be in the same unit\n\t\t\t\t# cameraMatrix and distCoeffs are the camera calibration parameters that need to be known prior to executing this function.\n\t\t\t\t# rvecs and tvecs are the rotation and translation vectors respectively.\n\t\t\t\t# The marker coordinate axes are centered on the middle of the marker, with the Z axis perpendicular to the marker plane.\n\t\t\trvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners, marker_side_length, cameraMatrix, distCoeffs)\n\t\t\t\n\t\t\t# The aruco module provides a function to draw the coordinate axes onto the image, so pose estimation can be visually verified:\n\t\t\t# Image is the input/output image where the axis will be drawn (it will normally be the same image where the markers were detected).\n\t\t\t# cameraMatrix and distCoeffs are the camera calibration parameters.\n\t\t\t# rvec and tvec are the pose parameters whose axis want to be drawn.\n\t\t\t# The last parameter is the length of the axis, in the same unit that tvec (usually meters)\n\t\t\t# aruco.drawAxis(frame, cameraMatrix, distCoeffs, rvec, tvec, 0.5*marker_side_length) #Draw Axis\n\t\t\t\n\t\t\t# Append frame_time, tvecs, rvecs, and valve_status to the pose_transformation list, to be saved to a csv after the loop is complete\n\t\t\ttime_now = datetime.datetime.utcnow().timestamp()\n\t\t\tframe_time = time_now - start_time\n\t\t\tpose_transformation.append([frame_time, rvec[0][0][0], rvec[0][0][1], rvec[0][0][2], tvec[0][0][0], tvec[0][0][1], tvec[0][0][2], valve_status])\n\t\t\n\n\t\texcept:\n\t\t\t# If any of the functions in the loop throw an error, just write NaNs to this data point and move on\n\t\t\tprint(\"Couldn't detect corners!\")\n\t\t\ttime_now = datetime.datetime.utcnow().timestamp()\n\t\t\tframe_time = time_now - start_time\n\t\t\tpose_transformation.append([frame_time, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, valve_status])\n\t\t\tpass\n\t\t\t\n\t\t# Display the captured frame with reduced size (in case your source video has larger resolution than your monitor)\n#\t\tb = cv2.resize(frame, out_res, fx=0, fy=0, interpolation=cv2.INTER_CUBIC)\t# Resize the frame from the source video and instantiate it as a new object 'b'\n#\t\tcv2.namedWindow('Detected Aruco Markers', cv2.WINDOW_AUTOSIZE)\t\t\t\t# Create a window to display the modified frames in\n#\t\tcv2.resizeWindow('Detected Aruco Markers', out_res)\t\t\t\t\t\t\t# Resize the window by explicitly defining its resolution (without this it MAY appear teeny-tiny for no apparent reason)\n#\t\tcv2.imshow('Detected Aruco Markers', frame)\t\t\t\t\t\t\t\t\t\t# SHOW ME WHAT YOU GOT.\n\n\t\t# ...and write the result to the output video object 'out'.\n\t\t# Just to clarify: 'out' is a VideoWriter object. A single frame object 'b' is written to the VideoWriter object 'out'.\n#\t\tout.write(frame)\n\n\t\t# Press 'q' to quit early. Don't worry, the video has already been written to 'out' BUT THE DATA FILE HASN'T\n\t\tif cv2.waitKey(1) & 0xFF == ord('q'):\n\t\t\tprint('')\n\t\t\tbreak\n\n\tprint(\"Stopping!\")\n\t# When everything done, release the capture and close it all out\n\theaders = ['Time (s)', 'r1', 'r2', 'r3', 't1', 't2', 't3', 'valveStatus']\n\tdf = pd.DataFrame(pose_transformation, columns=headers)\n\tdf.to_csv(video_loc + datetime_stamp + '_datafile.csv', index=False)\n\tcapture.release()\n\tout.release()\n\tcv2.destroyAllWindows()\n\n\tprint('')\n\tprint('')\n\tprint('Process complete.')\n\tprint('Video saved to {0}'.format(out_loc))\n\n\nif __name__ == \"__main__\":\n\ttry:\n\t\t# Specify location of calibration file you wish to use. It will save calibration data in the same location.\n\t\tcalib_loc = sys.argv[1]\n\t\t# Specify location of video file containing the aruco marker which you wish to extract the pose of.\n\t\tvideo_loc = sys.argv[2]\n\texcept:\n\t\tpass\n\n\t# Set up MQTT client\n\tclient = mqtt.Client(\"computervision\", protocol=mqtt.MQTTv31)\n\tclient.on_connect = on_connect\n\tclient.on_message = on_message\n\n\t# Connect!\n\tclient.connect(\"localhost\", 1883, 60)\t# (host, port, keepalive)\n\tsetup_recording(calib_loc, video_loc)\n\n\twhile True:\n\t\tclient.loop()", "sub_path": "aruco/getPoseAruco_toBeInSitu_MQTT.py", "file_name": "getPoseAruco_toBeInSitu_MQTT.py", "file_ext": "py", "file_size_in_byte": 14223, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "datetime.datetime.now", "line_number": 56, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 56, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path", "line_number": 99, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 99, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 108, "usage_type": "call"}, {"api_name": "os.path", "line_number": 108, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.FileStorage", "line_number": 114, "usage_type": "call"}, {"api_name": "cv2.FILE_STORAGE_READ", "line_number": 114, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 121, "usage_type": "call"}, {"api_name": "cv2.CAP_PROP_FPS", "line_number": 122, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_WIDTH", "line_number": 123, "usage_type": "attribute"}, {"api_name": "cv2.CAP_PROP_FRAME_HEIGHT", "line_number": 124, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 130, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 133, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path", "line_number": 137, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 148, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 148, "usage_type": "attribute"}, {"api_name": "cv2.GaussianBlur", "line_number": 166, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 167, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 167, "usage_type": "attribute"}, {"api_name": "cv2.aruco.Dictionary_get", "line_number": 168, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 168, "usage_type": "name"}, {"api_name": "cv2.aruco.DICT_6X6_250", "line_number": 168, "usage_type": "attribute"}, {"api_name": "cv2.aruco.DetectorParameters_create", "line_number": 169, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 169, "usage_type": "name"}, {"api_name": "cv2.aruco.detectMarkers", "line_number": 174, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 174, "usage_type": "name"}, {"api_name": "cv2.aruco.estimatePoseSingleMarkers", "line_number": 190, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 190, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 200, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 200, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 208, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 208, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 210, "usage_type": "attribute"}, {"api_name": "cv2.waitKey", "line_number": 224, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 231, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 235, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 246, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 248, "usage_type": "attribute"}, {"api_name": "paho.mqtt.client.Client", "line_number": 253, "usage_type": "call"}, {"api_name": "paho.mqtt.client", "line_number": 253, "usage_type": "name"}, {"api_name": "paho.mqtt.client.MQTTv31", "line_number": 253, "usage_type": "attribute"}]} +{"seq_id": "121306669", "text": "''' October 10, 2016\nUsing real tuning curves, generating fake spikes with actual angle data using possion\nassumption and running the radius scaling analysis on the resultant spike counts'''\n\nfrom __future__ import division\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\nimport os\n\n\ngen_fn_dir = os.path.abspath('../..') + '/shared_scripts'\nsys.path.append(gen_fn_dir)\nfrom general_file_fns import load_file, save_file\nfrom data_gen_class import generate\nfrom binned_spikes_class import binned_spikes\n\nrad_scal_dir = os.path.abspath('../') + '/dim_estimation'\nsys.path.append(rad_scal_dir)\nfrom radius_scaling import get_nbrs\nfrom plot_nbrs_v_scaling import fit_piecewise, piecewise_linear\n\nwith open(os.path.expanduser('~') + '/path_to_hd_data.txt', \"r\") as myfile:\n data_path = myfile.readlines()[0]\n data_path = data_path.rstrip()\nTC_data_path = data_path + 'analyses/2016_04_tuning_curves/'\n\nsession_id = 'Mouse12-120808'\n\n# loading the tuning curve data for each cell\nTC_data = load_file(TC_data_path + session_id + '_bins_30.p')\ntuning_curves = TC_data['tuning_curve_data']\n\n# replacing the keys in the tuning curves w/ sth random; otherwise causes error in the plotting\n# method later\nfor i, key in enumerate(sorted(tuning_curves.keys())):\n tuning_curves[i / 1.2] = tuning_curves.pop(key)\ntime_bin = 0.5\nstate = 'Wake'\nbinned_data = binned_spikes(session_id, time_bin)\n\n# loading the angles data for the session\nangle_list = binned_data.concat_intvl_matrix(state)[1]\n\n# instance of the TC generator\ngen_data = generate(angle_list=angle_list, tuning_curves=tuning_curves)\n\n# plot all the generated tuning curves\ngen_data.plot_tuning_curves()\nplt.show()\n\n# generating cell responses given the angle data and the tuning curves\n# a) Poisson assumption\ncell_responses = gen_data.counts_from_tcs()\ncount_matrix = np.array([cell_responses[x] for x in sorted(cell_responses)]).T\n\n# b) Gaussian assumption\nstd_dev = 1\ncell_responses_gaus = gen_data.counts_from_tcs_gaussian(std_dev)\ncount_matrix_gaus = np.array([cell_responses_gaus[x] for x in sorted(cell_responses_gaus)]).T\n\n# running the radius scaling analysis with max 70 nbrs\nmean_nbrs, std_nbrs, rad_list = get_nbrs(count_matrix, 1, 70, 1)\nmean_nbrs_gaus, std_nbrs_gaus, rad_list_gaus = get_nbrs(count_matrix_gaus, 1, 70, 1)\n\n# log transform of the radius list and the mean # nbrs\nl_rad_list = np.log(rad_list)\nl_mean_nbrs = np.log(mean_nbrs)\n\nl_rad_list_gaus = np.log(rad_list_gaus)\nl_mean_nbrs_gaus = np.log(mean_nbrs_gaus)\n\n# fittng piecewise line to the log transform of the rad_list and mean_nbrs\nx1, y1, xd, p = fit_piecewise(l_rad_list, l_mean_nbrs)\nx2, y2, xe, p1 = fit_piecewise(l_rad_list_gaus, l_mean_nbrs_gaus)\n\nplt.figure()\nplt.suptitle('%s: Poisson assumption' % session_id, fontsize=30)\nplt.plot(x1, y1, \"o\", markersize=12)\nplt.plot(xd, piecewise_linear(xd, *p), lw=6)\nplt.title('State: %s; bin size: %ss; slope: %.2f; slope: %.2f' %\n (state, time_bin, p[2], p[3]), fontsize=25)\nplt.xlabel('log Radius', fontsize=25)\nplt.ylabel('log Number of neighbors', fontsize=25)\nplt.xlim(1.5, 4.5)\nfigure = plt.gcf()\nfigure.set_size_inches(15, 10)\nplt.savefig('/Users/birajpandey/Desktop/Real_TCs_poisson.png', format='png', dpi=100)\n\nplt.figure()\nplt.suptitle('%s: Gaussian assumption w/ noise= %.2f' % (session_id, std_dev), fontsize=30)\nplt.plot(x2, y2, \"o\", markersize=12)\nplt.plot(xe, piecewise_linear(xe, *p1), lw=6)\nplt.title('State: %s; bin size: %ss; slope: %.2f; slope: %.2f' %\n (state, time_bin, p1[2], p1[3]), fontsize=25)\nplt.xlabel('log Radius', fontsize=25)\nplt.ylabel('log Number of neighbors', fontsize=25)\nplt.xlim(1.5, 4.5)\nfigure = plt.gcf()\nfigure.set_size_inches(15, 10)\nplt.show()\nplt.savefig('/Users/birajpandey/Desktop/Real_TCs_gaussian.png', format='png', dpi=100)\n", "sub_path": "dim_estimation/radius_scaling/rs_real_TC.py", "file_name": "rs_real_TC.py", "file_ext": "py", "file_size_in_byte": 3793, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.path.abspath", "line_number": 12, "usage_type": "call"}, {"api_name": "os.path", "line_number": 12, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 19, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.expanduser", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "general_file_fns.load_file", "line_number": 31, "usage_type": "call"}, {"api_name": "binned_spikes_class.binned_spikes", "line_number": 40, "usage_type": "call"}, {"api_name": "data_gen_class.generate", "line_number": 46, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 60, "usage_type": "call"}, {"api_name": "radius_scaling.get_nbrs", "line_number": 63, "usage_type": "call"}, {"api_name": "radius_scaling.get_nbrs", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 71, "usage_type": "call"}, {"api_name": "plot_nbrs_v_scaling.fit_piecewise", "line_number": 74, "usage_type": "call"}, {"api_name": "plot_nbrs_v_scaling.fit_piecewise", "line_number": 75, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 78, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 78, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 79, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 79, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 80, "usage_type": "name"}, {"api_name": "plot_nbrs_v_scaling.piecewise_linear", "line_number": 80, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 86, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 86, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 88, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 88, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.suptitle", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "plot_nbrs_v_scaling.piecewise_linear", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.title", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 97, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 97, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 102, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 102, "usage_type": "name"}]} +{"seq_id": "329898032", "text": "#!/usr/bin/env python\n# encoding: utf-8\n\n# The MIT License (MIT)\n\n# Copyright (c) 2014-2015 CNRS\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# AUTHORS\n# Hervé BREDIN - http://herve.niderb.fr\n\nfrom __future__ import print_function\n\nimport pytest\nfrom pyannote.core import Segment\nfrom pyannote.parser import SRTParser\nimport numpy as np\nimport tempfile\nimport os\n\nSAMPLE = \"\"\"1\n00:00:01,240 --> 00:00:03,834\nIf a photon is directed through a plane\nwith two slits in it...\n\n\n\n2\n00:00:04,000 --> 00:00:05,149\n...and either is observed...\n\n\n\n3\n00:00:05,319 --> 00:00:07,549\n...it will not go through both.\nIf unobserved. it will.\n\"\"\"\n\n\n@pytest.fixture\ndef sample(request):\n\n _, filename = tempfile.mkstemp()\n with open(filename, 'w') as f:\n f.write(SAMPLE)\n\n def delete():\n os.remove(filename)\n request.addfinalizer(delete)\n\n return filename\n\n\ndef test_load(sample):\n parser = SRTParser()\n transcriptions = parser.read(sample)\n subtitles = transcriptions()\n assert list(subtitles.ordered_edges_iter(data=True)) == [\n (-np.inf, 1.240, {}),\n (1.240, 3.834, {'subtitle': 'If a photon is directed through a plane with two slits in it...'}),\n (3.834, 4.000, {}, ),\n (4.000, 5.149, {'subtitle': '...and either is observed...'}),\n (5.149, 5.319, {}),\n (5.319, 7.549, {'subtitle': '...it will not go through both. If unobserved. it will.'}),\n (7.549, np.inf, {})\n ]\n", "sub_path": "tests/test_srt.py", "file_name": "test_srt.py", "file_ext": "py", "file_size_in_byte": 2459, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "tempfile.mkstemp", "line_number": 61, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 66, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 58, "usage_type": "attribute"}, {"api_name": "pyannote.parser.SRTParser", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 77, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 83, "usage_type": "attribute"}]} +{"seq_id": "370278514", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('engine', '0011_auto_20160128_2100'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='historicalscoringframework',\n name='documentation',\n ),\n migrations.RemoveField(\n model_name='scoringframework',\n name='documentation',\n ),\n migrations.AddField(\n model_name='historicalscoringscenario',\n name='documentation_markdown',\n field=models.TextField(null=True, blank=True),\n ),\n migrations.AddField(\n model_name='scoringscenario',\n name='documentation_markdown',\n field=models.TextField(null=True, blank=True),\n ),\n ]\n", "sub_path": "dss/apps/engine/migrations/0012_auto_20160129_2039.py", "file_name": "0012_auto_20160129_2039.py", "file_ext": "py", "file_size_in_byte": 876, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.RemoveField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.migrations.RemoveField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 25, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "519668669", "text": "import foolbox\nfrom foolbox.models import KerasModel\nfrom foolbox.attacks import BoundaryAttack\nfrom foolbox.criteria import TargetClassProbability, TopKMisclassification, TargetClass\nimport numpy as np\nimport keras\nfrom keras.preprocessing import image\nfrom keras.applications.densenet import DenseNet121\nfrom keras.applications.densenet import decode_predictions\n\n# Load pretrained DenseNet\nkeras.backend.set_learning_phase(0)\nkmodel = DenseNet121(weights='imagenet')\n\n# Load two images. The cat image is original image\n# and the dog image is used to initialize a targeted\n# attack.\ndog_img = image.load_img('dog.jpg', target_size=(224, 224))\ncat_img = image.load_img('cat.jpg', target_size=(224, 224))\ndog_img = image.img_to_array(dog_img)\ncat_img = image.img_to_array(cat_img)\ncat_img = 2.0 * cat_img / 255.0 - 1\ndog_img = 2.0 * dog_img / 255.0 - 1\n\ndog_x = np.expand_dims(dog_img, axis=0)\ncat_x = np.expand_dims(cat_img, axis=0)\n\n# Build a foolbox model\nfmodel = KerasModel(kmodel, bounds=(-1, 1))\n\n# label of the target class\npreds = kmodel.predict(dog_x)\ndog_label=np.argmax(preds)\n\n# label of the original class\npreds = kmodel.predict(cat_x)\ncat_label=np.argmax(preds)\n\ncriterion_1 = TopKMisclassification(k=5)\ncriterion_2 = TargetClass(dog_label)\ncriterion_3 = TargetClassProbability(dog_label, p=0.5)\ncriterion = criterion_1 & criterion_2 & criterion_3\n\nattack = BoundaryAttack(model=fmodel,\n criterion=criterion)\n\niteration_size = 1000\nglobal_iterations = 0\n# Run boundary attack to generate an adversarial example\nadversarial = attack(cat_img,\n label=cat_label,\n unpack=False,\n iterations=iteration_size,\n starting_point=dog_img,\n log_every_n_steps=10,\n verbose=True)\nglobal_iterations += iteration_size\n\nnp.save('adversarial_image_{0}'.format(global_iterations), adversarial.image)\n\nfor i in range(10):\n adversarial = attack(adversarial,\n unpack=False,\n iterations=iteration_size,\n verbose=True)\n global_iterations += iteration_size\n np.save('adversarial_image_{0}'.format(global_iterations), adversarial.image)\n\n # show results\n print(np.argmax(fmodel.predictions(adversarial.image)))\n print(fmodel.predictions(foolbox.utils.softmax(adversarial.image))[dog_label])\n preds = kmodel.predict(adversarial.image.copy())\n print(\"Top 5 predictions (adversarial: \", decode_predictions(preds, top=5))", "sub_path": "tong/test_boundary_attack.py", "file_name": "test_boundary_attack.py", "file_ext": "py", "file_size_in_byte": 2544, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "keras.backend.set_learning_phase", "line_number": 12, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 12, "usage_type": "attribute"}, {"api_name": "keras.applications.densenet.DenseNet121", "line_number": 13, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 18, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 18, "usage_type": "name"}, {"api_name": "keras.preprocessing.image.load_img", "line_number": 19, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 19, "usage_type": "name"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 20, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 20, "usage_type": "name"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 21, "usage_type": "call"}, {"api_name": "keras.preprocessing.image", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 26, "usage_type": "call"}, {"api_name": "foolbox.models.KerasModel", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 37, "usage_type": "call"}, {"api_name": "foolbox.criteria.TopKMisclassification", "line_number": 39, "usage_type": "call"}, {"api_name": "foolbox.criteria.TargetClass", "line_number": 40, "usage_type": "call"}, {"api_name": "foolbox.criteria.TargetClassProbability", "line_number": 41, "usage_type": "call"}, {"api_name": "foolbox.attacks.BoundaryAttack", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 70, "usage_type": "call"}, {"api_name": "foolbox.utils.softmax", "line_number": 71, "usage_type": "call"}, {"api_name": "foolbox.utils", "line_number": 71, "usage_type": "attribute"}, {"api_name": "keras.applications.densenet.decode_predictions", "line_number": 73, "usage_type": "call"}]} +{"seq_id": "499750418", "text": "import numpy as np\n\n# Used for setting up data\nimport cv2\nfrom keras.preprocessing.image import img_to_array\nfrom keras.preprocessing.image import array_to_img\nfrom keras.utils import to_categorical\nfrom imutils import paths\n\n# Used for build\nfrom keras.models import Sequential\nfrom keras.layers.convolutional import Conv2D\nfrom keras.layers.convolutional import MaxPooling2D\nfrom keras.layers.core import Activation\nfrom keras.layers.core import Flatten\nfrom keras.layers.core import Dense\nfrom keras import backend as K\n\n# Used for aug data gen\nfrom keras.preprocessing.image import ImageDataGenerator\n\n# Used for training\nfrom keras.optimizers import Adam\n\n# Used for predictions\nfrom keras.models import load_model\n\n# Used for live predictions\nimport time\nfrom PIL import ImageGrab\n\n# Used for GUI\nimport tkinter\nfrom PIL import ImageTk\nfrom PIL import Image\n\n# Used for Generating/Labeling Data\nfrom shutil import copyfile\nimport os\nfrom random import randint\n\nclass LeNet:\n @staticmethod\n def build(width, height, depth, classes):\n # initialize the model\n model = Sequential()\n inputShape = (height, width, depth)\n\n # if we are using \"channels first\", update the input shape\n if (K.image_data_format() == \"channels_first\"):\n inputShape = (depth, height, width)\n\n # first set of CONV => RELU => POOL layers\n model.add(Conv2D(20, (5, 5), padding=\"same\", input_shape=inputShape))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n # second set of CONV => RELU => POOL layers\n model.add(Conv2D(50, (5, 5), padding=\"same\"))\n model.add(Activation(\"relu\"))\n model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))\n\n\n\n # first (and only) set of FC => RELU layers\n model.add(Flatten())\n model.add(Dense(500))\n model.add(Activation(\"relu\"))\n\n # softmax classifier\n model.add(Dense(classes))\n model.add(Activation(\"softmax\"))\n\n # return the constructed network architecture\n return model\n\n# CNN 1\n\ndef loadTrainingImages1():\n x_train = np.zeros((87, 32, 32, 3))\n\n imagePaths = sorted(list(paths.list_images(\"trainData/\")))\n\n for i in range(len(imagePaths)):\n\n img = cv2.imread(imagePaths[i])\n img = cv2.resize(img, (32, 32))\n img = img_to_array(img)\n x_train[i] = img\n\n y_train = np.zeros(len(x_train))\n\n for i in range(len(y_train)):\n y_train[i] = i\n\n return x_train, y_train\n\ndef loadTestingImages1():\n\n img = cv2.imread(\"testCNN.png\")\n arr = img_to_array(img)\n cv2.imwrite(\"croppped.png\", arr[58:169, 702:1215])\n\n arr = arr[58:169, 702:1215]\n\n cv2.imwrite(\"testData/output1.png\", arr[30:97, 16:70])\n\n cv2.imwrite(\"testData/output2.png\", arr[30:97, 77:131])\n\n cv2.imwrite(\"testData/output3.png\", arr[30:97, 138:192])\n\n cv2.imwrite(\"testData/output4.png\", arr[30:97, 199:253])\n\n cv2.imwrite(\"testData/output5.png\", arr[30:97, 260:314])\n\n cv2.imwrite(\"testData/output6.png\", arr[30:97, 321:375])\n\n cv2.imwrite(\"testData/output7.png\", arr[30:97, 382:436])\n\n cv2.imwrite(\"testData/output8.png\", arr[30:97, 443:497])\n\ndef trainModel1():\n EPOCHS = 150\n INIT_LR = 1e-3\n BS = 8\n\n print(\"[INFO] Loading Images\")\n x_train, y_train = loadTrainingImages1()\n #x_test, y_test = loadTestingImages()\n print(x_train.shape)\n print(y_train.shape)\n #print(x_test.shape)\n #print(y_test.shape)\n print(\"[INFO] Images have been loaded.\")\n\n x_train /= 255\n #x_test /= 255\n\n y_train = to_categorical(y_train, num_classes=87)\n #y_test = to_categorical(y_test, num_classes=87)\n\n\n aug = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2)\n\n print(\"[INFO] compiling model...\")\n model = LeNet.build(width=32, height=32, depth=3, classes=87)\n opt = Adam(lr=INIT_LR, decay=INIT_LR/EPOCHS)\n model.compile(loss=\"binary_crossentropy\", optimizer=opt, metrics=[\"accuracy\"])\n\n\n print(\"[INFO] training network...\")\n H = model.fit_generator(aug.flow(x_train, y_train, batch_size=BS), \n validation_data=(x_train, y_train), steps_per_epoch=len(x_train) // BS,\n epochs=EPOCHS, verbose=1)\n\n print(\"[INFO] serializing network...\")\n model.save(\"testNet.model\")\n\ndef modelPredicts1():\n\n loadTestingImages1()\n\n imageNames = sorted(list(paths.list_images(\"trainData/\")))\n\n for i in range(len(imageNames)):\n imageNames[i] = imageNames[i][imageNames[i].find('/')+1:-4]\n\n print(\"[INFO] loading network...\")\n model = load_model(\"testNet.model\")\n\n for i in range(8):\n img = cv2.imread(\"testData/output\" + str(i+1) + \".png\")\n orig = img.copy()\n\n img = cv2.resize(img, (32, 32))\n img = img.astype(\"float\")/255.0\n img = img_to_array(img)\n img = np.expand_dims(img, axis=0)\n\n\n output = model.predict(img)[0]\n label = output.argmax()\n\n print(output)\n print(label)\n\n label = \"{}: {:.2f}%\".format(imageNames[label], output[label] * 100)\n\n print(label)\n\n orig = cv2.resize(orig, (400, 400))\n cv2.putText(orig, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n\n cv2.imshow(\"Output\", orig)\n cv2.waitKey(0)\n\ndef liveModelPredicts1():\n\n imagePaths = sorted(list(paths.list_images(\"trainData/\")))\n imageNames = sorted(list(paths.list_images(\"trainData/\")))\n\n for i in range(len(imageNames)):\n imageNames[i] = imageNames[i][imageNames[i].find('/')+1:-4]\n\n print(\"[INFO] loading network...\")\n model = load_model(\"testNet.model\")\n\n opponentCards = ['MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard']\n tempOpponentCards = ['MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard']\n\n root = tkinter.Tk()\n myFrame = tkinter.LabelFrame(root, text=\"Opponent's Cards\", labelanchor=\"n\")\n myFrame.pack()\n\n print(\"[INFO] Type anything and press enter to begin...\")\n input()\n\n startTime = time.time()\n\n while (True):\n\n if (time.time()-startTime > 1):\n\n im = ImageGrab.grab()\n im.save(\"testCNN.png\")\n loadTestingImages1()\n\n for i in range(8):\n\n if (opponentCards[i] != \"MysteryCard\"):\n continue\n\n img = cv2.imread(\"testData/output\" + str(i+1) + \".png\")\n img = cv2.resize(img, (32, 32))\n img = img.astype(\"float\")/255.0\n img = img_to_array(img)\n img = np.expand_dims(img, axis=0)\n\n output = model.predict(img)[0]\n label = output.argmax()\n\n if (imageNames[label] == \"MysteryCard\"):\n continue\n\n elif (tempOpponentCards[i] == imageNames[label]):\n opponentCards[i] = imageNames[label]\n\n img = Image.open(imagePaths[label])\n img.thumbnail((128, 128), Image.ANTIALIAS)\n img = ImageTk.PhotoImage(img)\n panel = tkinter.Label(myFrame, image = img, borderwidth=10)\n panel.image = img\n panel.grid(row=0, column=i)\n root.update()\n\n else:\n tempOpponentCards[i] = imageNames[label]\n\n labelString = \"{}: {:.2f}%\".format(imageNames[label], output[label] * 100)\n\n print(labelString)\n\n print(\"--------Opponent's Deck--------\")\n print(opponentCards)\n print()\n print()\n\n startTime = time.time()\n\n# CNN 2\n\ndef generateTrainingImages2():\n\n currentNumOfData = len(sorted(list(paths.list_images(\"generatedData/\"))))\n\n print(\"[INFO] Type anything and press enter to begin...\")\n input()\n\n startTime = time.time()\n\n i = 0\n\n while (True):\n\n if (time.time()-startTime > 1):\n print(\"--------Captured Data--------\")\n\n im = ImageGrab.grab()\n im.save(\"generatedData/input\" + str(i+1+currentNumOfData) + \".png\")\n i += 1\n\n startTime = time.time()\n\ndef labelTrainingData2():\n\n imagePaths = sorted(list(paths.list_images(\"generatedData/\")))\n currentNumOfLabeledData = len(sorted(list(paths.list_images(\"trainData2/\"))))\n\n root = tkinter.Tk()\n myFrame = tkinter.LabelFrame(root, text=\"Unlabeled Data\", labelanchor=\"n\")\n myFrame.pack()\n\n labeledCount = 0\n\n for i in range(len(imagePaths)):\n img = Image.open(imagePaths[i])\n img.thumbnail((1500, 1500), Image.ANTIALIAS)\n img = ImageTk.PhotoImage(img)\n panel = tkinter.Label(myFrame, image = img)\n panel.image = img\n panel.grid(row=0, column=0)\n root.update()\n\n label = input()\n\n if (label != 'e'):\n copyfile(imagePaths[i], \"trainData2/\"+label+\"input\"+str(labeledCount+currentNumOfLabeledData)+\".png\")\n labeledCount += 1\n\n os.remove(imagePaths[i])\n\ndef loadTrainingImages2():\n\n imagePaths = sorted(list(paths.list_images(\"trainData2/\")))\n x_train = np.zeros((len(imagePaths)*2, 28, 28, 3))\n\n j = 0\n\n for i in range(len(imagePaths)):\n\n # Positive Label\n\n img = cv2.imread(imagePaths[i])\n arr = img_to_array(img)\n #cv2.imwrite(\"croppped.png\", arr[58:88, 702:1215])\n\n arr = arr[58:88, 702:1215]\n\n card = int(imagePaths[i][imagePaths[i].find('/')+1])\n\n if (card == 0):\n arr = arr[0:30, 16:70]\n\n elif (card == 1):\n arr = arr[0:30, 77:131]\n\n elif (card == 2):\n arr = arr[0:30, 138:192]\n\n elif (card == 3):\n arr = arr[0:30, 199:253]\n\n elif (card == 4):\n arr = arr[0:30, 260:314]\n\n elif (card == 5):\n arr = arr[0:30, 321:375]\n\n elif (card == 6):\n arr = arr[0:30, 382:436]\n\n elif (card == 7):\n arr = arr[0:30, 443:497]\n\n\n img = arr\n img = cv2.resize(img, (28, 28))\n img = img_to_array(img)\n x_train[j] = img\n\n\n # Negative Label\n\n\n img = cv2.imread(imagePaths[i])\n arr = img_to_array(img)\n #cv2.imwrite(\"croppped.png\", arr[58:88, 702:1215])\n\n arr = arr[58:88, 702:1215]\n\n card = int(imagePaths[i][imagePaths[i].find('/')+1])\n nonPlayedCards = np.arange(8)\n nonPlayedCards = nonPlayedCards.tolist()\n nonPlayedCards.remove(card)\n\n cardNotPlayed = randint(0, 6)\n\n\n if (cardNotPlayed == 0):\n arr = arr[0:30, 16:70]\n\n elif (cardNotPlayed == 1):\n arr = arr[0:30, 77:131]\n\n elif (cardNotPlayed == 2):\n arr = arr[0:30, 138:192]\n\n elif (cardNotPlayed == 3):\n arr = arr[0:30, 199:253]\n\n elif (cardNotPlayed == 4):\n arr = arr[0:30, 260:314]\n\n elif (cardNotPlayed == 5):\n arr = arr[0:30, 321:375]\n\n elif (cardNotPlayed == 6):\n arr = arr[0:30, 382:436]\n\n elif (cardNotPlayed == 7):\n arr = arr[0:30, 443:497]\n\n\n img = arr\n img = cv2.resize(img, (28, 28))\n img = img_to_array(img)\n x_train[j+1] = img\n\n j += 2\n\n y_train = np.zeros(len(x_train))\n\n for i in range(len(y_train)):\n y_train[i] = (i+1)%2\n\n return x_train, y_train\n\ndef loadTestingImages2():\n\n img = cv2.imread(\"testCNN.png\")\n arr = img_to_array(img)\n cv2.imwrite(\"croppped.png\", arr[58:88, 702:1215])\n\n arr = arr[58:88, 702:1215]\n\n cv2.imwrite(\"testData2/output1.png\", arr[0:30, 16:70])\n\n cv2.imwrite(\"testData2/output2.png\", arr[0:30, 77:131])\n\n cv2.imwrite(\"testData2/output3.png\", arr[0:30, 138:192])\n\n cv2.imwrite(\"testData2/output4.png\", arr[0:30, 199:253])\n\n cv2.imwrite(\"testData2/output5.png\", arr[0:30, 260:314])\n\n cv2.imwrite(\"testData2/output6.png\", arr[0:30, 321:375])\n\n cv2.imwrite(\"testData2/output7.png\", arr[0:30, 382:436])\n\n cv2.imwrite(\"testData2/output8.png\", arr[0:30, 443:497])\n\ndef trainModel2():\n EPOCHS = 150\n INIT_LR = 1e-3\n BS = 8\n\n print(\"[INFO] Loading Images\")\n x_train, y_train = loadTrainingImages2()\n print(x_train.shape)\n print(y_train.shape)\n print(\"[INFO] Images have been loaded.\")\n\n x_train /= 255\n\n y_train = to_categorical(y_train, num_classes=2)\n\n aug = ImageDataGenerator(width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2)\n\n print(\"[INFO] compiling model...\")\n model = LeNet.build(width=28, height=28, depth=3, classes=2)\n opt = Adam(lr=INIT_LR, decay=INIT_LR/EPOCHS)\n model.compile(loss=\"binary_crossentropy\", optimizer=opt, metrics=[\"accuracy\"])\n\n\n print(\"[INFO] training network...\")\n H = model.fit_generator(aug.flow(x_train, y_train, batch_size=BS), \n validation_data=(x_train, y_train), steps_per_epoch=len(x_train) // BS,\n epochs=EPOCHS, verbose=1)\n\n print(\"[INFO] serializing network...\")\n model.save(\"testNet2.model\")\n\ndef modelPredicts2():\n\n loadTestingImages2()\n\n print(\"[INFO] loading network...\")\n model = load_model(\"testNet2.model\")\n\n for i in range(8):\n img = cv2.imread(\"testData2/output\" + str(i+1) + \".png\")\n orig = img.copy()\n\n img = cv2.resize(img, (28, 28))\n img = img.astype(\"float\")/255.0\n img = img_to_array(img)\n img = np.expand_dims(img, axis=0)\n\n\n output = model.predict(img)[0]\n label = output.argmax()\n msg = \"Not Placed\"\n\n if (label == 1):\n msg = \"Placed\"\n\n print(output)\n print(label)\n\n label = \"Card \" + str(i) + \" - {}: {:.2f}%\".format(msg, output[label] * 100)\n\n print(label)\n\n orig = cv2.resize(orig, (400, 400))\n cv2.putText(orig, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n\n cv2.imshow(\"Output\", orig)\n cv2.waitKey(0)\n\ndef liveModelPredicts2():\n\n print(\"[INFO] loading network...\")\n model = load_model(\"testNet2.model\")\n\n opponentHand = ['Card 1', 'Card 2', 'Card 3', 'Card 4', 'Card 5', 'Card 6', 'Card 7', 'Card 8']\n\n print(\"[INFO] Type anything and press enter to begin...\")\n input()\n\n startTime = time.time()\n\n while (True):\n\n if (time.time()-startTime > 1):\n\n im = ImageGrab.grab()\n im.save(\"testCNN.png\")\n loadTestingImages2()\n\n for i in range(8):\n img = cv2.imread(\"testData2/output\" + str(i+1) + \".png\")\n img = cv2.resize(img, (28, 28))\n img = img.astype(\"float\")/255.0\n img = img_to_array(img)\n img = np.expand_dims(img, axis=0)\n\n output = model.predict(img)[0]\n label = output.argmax()\n msg = \"Not Placed\"\n\n if (label == 1):\n msg = \"Placed\"\n opponentHand.remove(\"Card \" + str(i+1))\n opponentHand.append(\"Card \" + str(i+1))\n\n labelString = \"Card \" + str(i+1) + \" - {}: {:.2f}%\".format(msg, output[label] * 100)\n\n print(labelString)\n\n print(\"--------Opponent's Hand--------\")\n print(opponentHand)\n print()\n print()\n\n startTime = time.time()\n\n\n\ndef liveBothModelPredicts():\n\n imagePaths = sorted(list(paths.list_images(\"trainData/\")))\n imageNames = sorted(list(paths.list_images(\"trainData/\")))\n\n for i in range(len(imageNames)):\n imageNames[i] = imageNames[i][imageNames[i].find('/')+1:-4]\n\n cardCollection = loadCardCollection()\n\n print(\"[INFO] loading both networks...\")\n model1 = load_model(\"testNet.model\")\n model2 = load_model(\"testNet2.model\")\n\n opponentCards = ['MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard']\n tempOpponentCards = ['MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard']\n\n continuousClassificationCount = [0, 0, 0, 0, 0, 0, 0, 0]\n requiredContinuousClassificationCount = 3\n\n opponentHand = ['MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard', 'MysteryCard']\n\n # Cards that are placed before getting classified\n pending = []\n tempPending = []\n\n pendingElixir = 0\n\n root = tkinter.Tk()\n elixerFrame = tkinter.LabelFrame(root, text=\"Opponent's Elixer\", labelanchor=\"n\")\n elixerFrame.pack()\n\n myFrame = tkinter.LabelFrame(root, text=\"Opponent's Cards in Hand\", labelanchor=\"n\")\n myFrame.pack()\n\n myFrame2 = tkinter.LabelFrame(root, text=\"Opponent's Upcoming Cards\", labelanchor=\"n\")\n myFrame2.pack()\n\n #myFrame3 = tkinter.LabelFrame(root, text=\"Opponent's Deck\", labelanchor=\"n\")\n #myFrame3.pack()\n\n panel = tkinter.Label(elixerFrame, text='L')\n panel.grid(row=0, column=0)\n root.update()\n\n for i in range(4):\n img = Image.open(\"trainData/MysteryCard.png\")\n img.thumbnail((128, 128), Image.ANTIALIAS)\n img = ImageTk.PhotoImage(img)\n panel = tkinter.Label(myFrame, image = img, borderwidth=10, bg='green')\n panel.image = img\n panel.grid(row=0, column=i)\n root.update()\n\n for i in range(4):\n img = Image.open(\"trainData/MysteryCard.png\")\n img.thumbnail((128, 128), Image.ANTIALIAS)\n img = ImageTk.PhotoImage(img)\n panel = tkinter.Label(myFrame2, image = img, borderwidth=10, bg='orange')\n panel.image = img\n panel.grid(row=0, column=i)\n root.update()\n\n print(\"[INFO] Enter the starting elixer to begin..\")\n elixir = int(input())\n\n elixirRatio = 1/2.8\n\n startTime = time.time()\n trueStartTime = time.time()\n snapshotTime = 0.4\n\n while (True):\n\n elapsedTime = time.time()-startTime\n\n if (elapsedTime > 120):\n elixirRatio = 1/1.4\n\n if (elapsedTime > snapshotTime):\n\n startTime = time.time()\n\n elixir += elixirRatio * elapsedTime\n if (elixir > 10):\n elixir = 10\n\n panel = tkinter.Label(elixerFrame, text=format(elixir, '.1f'))\n panel.grid(row=0, column=0)\n root.update()\n\n im = ImageGrab.grab()\n im.save(\"testCNN.png\")\n loadTestingImages1()\n loadTestingImages2()\n\n for i in range(8):\n\n if (opponentCards[i] != \"MysteryCard\"):\n continue\n\n img = cv2.imread(\"testData/output\" + str(i+1) + \".png\")\n img = cv2.resize(img, (32, 32))\n img = img.astype(\"float\")/255.0\n img = img_to_array(img)\n img = np.expand_dims(img, axis=0)\n\n output = model1.predict(img)[0]\n label = output.argmax()\n\n if (imageNames[label] == \"MysteryCard\"):\n continue\n\n elif (tempOpponentCards[i] == imageNames[label]):\n if (continuousClassificationCount[i] == requiredContinuousClassificationCount):\n opponentCards[i] = imageNames[label]\n else:\n continuousClassificationCount[i] += 1\n\n #img = Image.open(imagePaths[label])\n #img.thumbnail((128, 128), Image.ANTIALIAS)\n #img = ImageTk.PhotoImage(img)\n #panel = tkinter.Label(myFrame3, image = img, borderwidth=10)\n #panel.image = img\n #panel.grid(row=0, column=i)\n #root.update()\n\n else:\n tempOpponentCards[i] = imageNames[label]\n continuousClassificationCount[i] = 0\n\n labelString = \"{}: {:.2f}%\".format(imageNames[label], output[label] * 100)\n\n print(labelString)\n\n # Move all pending cards to the back\n\n for i in range(len(pending)):\n\n if (opponentCards[pending[i]] == \"MysteryCard\"):\n tempPending.append(pending[i])\n continue\n\n else:\n opponentHand.pop(0)\n opponentHand.append(opponentCards[pending[i]])\n\n elixir -= cardCollection[opponentCards[pending[i]]]\n\n if (i == len(pending)-1 and len(tempPending) == 0):\n elixir += elixirRatio * (time.time() - pendingElixir)\n if (elixir > 10):\n elixir = 10\n pendingElixir = 0\n \n pending = tempPending\n tempPending = []\n\n for i in range(8):\n img = cv2.imread(\"testData2/output\" + str(i+1) + \".png\")\n img = cv2.resize(img, (28, 28))\n img = img.astype(\"float\")/255.0\n img = img_to_array(img)\n img = np.expand_dims(img, axis=0)\n\n output = model2.predict(img)[0]\n label = output.argmax()\n msg = \"Not Placed\"\n\n if (label == 1 or (label == 0 and output[label] < .80)):\n msg = \"Placed\"\n if (opponentCards[i] == \"MysteryCard\"):\n if (i not in pending):\n pending.append(i)\n if (pendingElixir == 0):\n pendingElixir = time.time()\n\n elif (opponentHand.index(opponentCards[i]) < 4):\n opponentHand.remove(opponentCards[i])\n opponentHand.append(opponentCards[i])\n elixir -= cardCollection[opponentCards[i]]\n\n labelString = \"Card \" + str(i+1) + \" - {}: {:.2f}%\".format(msg, output[label] * 100)\n\n print(labelString)\n\n for i in range(4):\n img = Image.open(\"trainData/\" + opponentHand[i] + \".png\")\n img.thumbnail((128, 128), Image.ANTIALIAS)\n img = ImageTk.PhotoImage(img)\n panel = tkinter.Label(myFrame, image = img, borderwidth=10, bg='green')\n panel.image = img\n panel.grid(row=0, column=i)\n root.update()\n\n for i in range(4):\n img = Image.open(\"trainData/\" + opponentHand[i+4] + \".png\")\n img.thumbnail((128, 128), Image.ANTIALIAS)\n img = ImageTk.PhotoImage(img)\n panel = tkinter.Label(myFrame2, image = img, borderwidth=10, bg='orange')\n panel.image = img\n panel.grid(row=0, column=i)\n root.update()\n \n print(\"--------Opponent's Deck--------\")\n print(opponentCards)\n print(\"--------Opponent's Hand--------\")\n print(opponentHand)\n print(\"--------Pending--------\")\n print(pending)\n print()\n print()\n\n\ndef createCardCollection():\n imageNames = sorted(list(paths.list_images(\"trainData/\")))\n\n for i in range(len(imageNames)):\n imageNames[i] = imageNames[i][imageNames[i].find('/')+1:-4]\n\n cardCollection = dict()\n\n for x in imageNames:\n print(x)\n cardCollection[x] = int(input())\n\n with open('cardCollection.txt', 'w') as f:\n for key, value in cardCollection.items():\n f.write('%s:%s\\n' % (key, value))\n\ndef loadCardCollection():\n data = dict()\n with open('cardCollection.txt') as raw_data:\n for item in raw_data:\n key,value = item.split(':', 1)\n data[key]=int(value[0:value.find('/')])\n\n return data\n\n# --- CNN 1 ---\n\n#trainModel1()\n#modelPredicts1()\n#liveModelPredicts1()\n\n\n# --- CNN 2 ---\n\n#generateTrainingImages2()\n#labelTrainingData2()\n#trainModel2()\n#modelPredicts2()\n#liveModelPredicts2()\n\nliveBothModelPredicts()\n#createCardCollection()\n#print(loadCardCollection())\n\ndef testingGUI():\n\n root = tkinter.Tk()\n\n myFrame = tkinter.LabelFrame(root, text=\"Opponent's Cards\", labelanchor=\"n\")\n myFrame.pack()\n\n for r in range(1):\n for c in range(4):\n img = Image.open(\"trainData/GoblinHutCard.png\")\n img.thumbnail((128, 128), Image.ANTIALIAS)\n img = ImageTk.PhotoImage(img)\n panel = tkinter.Label(myFrame, image = img, borderwidth=10)\n panel.image = img\n panel.grid(row=r, column=c)\n root.update()\n\n st = time.time()\n\n while(True):\n if(time.time() - st > 1):\n img = Image.open(\"trainData/TheLogCard.png\")\n img.thumbnail((128, 128), Image.ANTIALIAS)\n img = ImageTk.PhotoImage(img)\n panel = tkinter.Label(myFrame, image = img, borderwidth=10)\n panel.image = img\n panel.grid(row=0, column=2)\n root.update()\n\n st = time.time()", "sub_path": "Clash Royale Helper/Clash Royale Helper/Clash_Royale_Helper.py", "file_name": "Clash_Royale_Helper.py", "file_ext": "py", "file_size_in_byte": 25061, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "keras.models.Sequential", "line_number": 46, "usage_type": "call"}, {"api_name": "keras.backend.image_data_format", "line_number": 50, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 50, "usage_type": "name"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 54, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 55, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.MaxPooling2D", "line_number": 56, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.Conv2D", "line_number": 59, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 60, "usage_type": "call"}, {"api_name": "keras.layers.convolutional.MaxPooling2D", "line_number": 61, "usage_type": "call"}, {"api_name": "keras.layers.core.Flatten", "line_number": 66, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 67, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 68, "usage_type": "call"}, {"api_name": "keras.layers.core.Dense", "line_number": 71, "usage_type": "call"}, {"api_name": "keras.layers.core.Activation", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 80, "usage_type": "call"}, {"api_name": "imutils.paths.list_images", "line_number": 82, "usage_type": "call"}, {"api_name": "imutils.paths", "line_number": 82, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 87, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 91, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 100, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 101, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 102, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 106, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 108, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 112, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 114, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 116, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 118, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 120, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 139, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 143, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 147, "usage_type": "call"}, {"api_name": "imutils.paths.list_images", "line_number": 163, "usage_type": "call"}, {"api_name": "imutils.paths", "line_number": 163, "usage_type": "name"}, {"api_name": "keras.models.load_model", "line_number": 169, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 172, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 175, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 178, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 191, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 192, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 192, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 194, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 195, "usage_type": "call"}, {"api_name": "imutils.paths.list_images", "line_number": 199, "usage_type": "call"}, {"api_name": "imutils.paths", "line_number": 199, "usage_type": "name"}, {"api_name": "imutils.paths.list_images", "line_number": 200, "usage_type": "call"}, {"api_name": "imutils.paths", "line_number": 200, "usage_type": "name"}, {"api_name": "keras.models.load_model", "line_number": 206, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 211, "usage_type": "call"}, {"api_name": "tkinter.LabelFrame", "line_number": 212, "usage_type": "call"}, {"api_name": "time.time", "line_number": 218, "usage_type": "call"}, {"api_name": "time.time", "line_number": 222, "usage_type": "call"}, {"api_name": "PIL.ImageGrab.grab", "line_number": 224, "usage_type": "call"}, {"api_name": "PIL.ImageGrab", "line_number": 224, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 233, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 234, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 236, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 237, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 248, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 248, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 249, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 249, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 250, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 250, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 251, "usage_type": "call"}, {"api_name": "time.time", "line_number": 268, "usage_type": "call"}, {"api_name": "imutils.paths.list_images", "line_number": 274, "usage_type": "call"}, {"api_name": "imutils.paths", "line_number": 274, "usage_type": "name"}, {"api_name": "time.time", "line_number": 279, "usage_type": "call"}, {"api_name": "time.time", "line_number": 285, "usage_type": "call"}, {"api_name": "PIL.ImageGrab.grab", "line_number": 288, "usage_type": "call"}, {"api_name": "PIL.ImageGrab", "line_number": 288, "usage_type": "name"}, {"api_name": "time.time", "line_number": 292, "usage_type": "call"}, {"api_name": "imutils.paths.list_images", "line_number": 296, "usage_type": "call"}, {"api_name": "imutils.paths", "line_number": 296, "usage_type": "name"}, {"api_name": "imutils.paths.list_images", "line_number": 297, "usage_type": "call"}, {"api_name": "imutils.paths", "line_number": 297, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 299, "usage_type": "call"}, {"api_name": "tkinter.LabelFrame", "line_number": 300, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 306, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 306, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 307, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 307, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 308, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 308, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 309, "usage_type": "call"}, {"api_name": "shutil.copyfile", "line_number": 317, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 320, "usage_type": "call"}, {"api_name": "imutils.paths.list_images", "line_number": 324, "usage_type": "call"}, {"api_name": "imutils.paths", "line_number": 324, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 325, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 333, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 334, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 367, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 368, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 375, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 376, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 382, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 386, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 415, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 416, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 421, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 430, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 431, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 432, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 436, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 438, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 440, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 442, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 444, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 446, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 448, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 450, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 465, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.ImageDataGenerator", "line_number": 467, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 471, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 488, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 491, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 494, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 496, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 497, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 514, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 515, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 515, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 517, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 518, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 523, "usage_type": "call"}, {"api_name": "time.time", "line_number": 530, "usage_type": "call"}, {"api_name": "time.time", "line_number": 534, "usage_type": "call"}, {"api_name": "PIL.ImageGrab.grab", "line_number": 536, "usage_type": "call"}, {"api_name": "PIL.ImageGrab", "line_number": 536, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 541, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 542, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 544, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 545, "usage_type": "call"}, {"api_name": "time.time", "line_number": 565, "usage_type": "call"}, {"api_name": "imutils.paths.list_images", "line_number": 571, "usage_type": "call"}, {"api_name": "imutils.paths", "line_number": 571, "usage_type": "name"}, {"api_name": "imutils.paths.list_images", "line_number": 572, "usage_type": "call"}, {"api_name": "imutils.paths", "line_number": 572, "usage_type": "name"}, {"api_name": "keras.models.load_model", "line_number": 580, "usage_type": "call"}, {"api_name": "keras.models.load_model", "line_number": 581, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 597, "usage_type": "call"}, {"api_name": "tkinter.LabelFrame", "line_number": 598, "usage_type": "call"}, {"api_name": "tkinter.LabelFrame", "line_number": 601, "usage_type": "call"}, {"api_name": "tkinter.LabelFrame", "line_number": 604, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 610, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 615, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 615, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 616, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 616, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 617, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 617, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 618, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 624, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 624, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 625, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 625, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 626, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 626, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 627, "usage_type": "call"}, {"api_name": "time.time", "line_number": 637, "usage_type": "call"}, {"api_name": "time.time", "line_number": 638, "usage_type": "call"}, {"api_name": "time.time", "line_number": 643, "usage_type": "call"}, {"api_name": "time.time", "line_number": 650, "usage_type": "call"}, {"api_name": "tkinter.Label", "line_number": 656, "usage_type": "call"}, {"api_name": "PIL.ImageGrab.grab", "line_number": 660, "usage_type": "call"}, {"api_name": "PIL.ImageGrab", "line_number": 660, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 670, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 671, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 673, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 674, "usage_type": "call"}, {"api_name": "time.time", "line_number": 719, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 728, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 729, "usage_type": "call"}, {"api_name": "keras.preprocessing.image.img_to_array", "line_number": 731, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 732, "usage_type": "call"}, {"api_name": "time.time", "line_number": 744, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 756, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 756, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 757, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 757, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 758, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 758, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 759, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 765, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 765, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 766, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 766, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 767, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 767, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 768, "usage_type": "call"}, {"api_name": "imutils.paths.list_images", "line_number": 784, "usage_type": "call"}, {"api_name": "imutils.paths", "line_number": 784, "usage_type": "name"}, {"api_name": "tkinter.Tk", "line_number": 829, "usage_type": "call"}, {"api_name": "tkinter.LabelFrame", "line_number": 831, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 836, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 836, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 837, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 837, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 838, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 838, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 839, "usage_type": "call"}, {"api_name": "time.time", "line_number": 844, "usage_type": "call"}, {"api_name": "time.time", "line_number": 847, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 848, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 848, "usage_type": "name"}, {"api_name": "PIL.Image.ANTIALIAS", "line_number": 849, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 849, "usage_type": "name"}, {"api_name": "PIL.ImageTk.PhotoImage", "line_number": 850, "usage_type": "call"}, {"api_name": "PIL.ImageTk", "line_number": 850, "usage_type": "name"}, {"api_name": "tkinter.Label", "line_number": 851, "usage_type": "call"}, {"api_name": "time.time", "line_number": 856, "usage_type": "call"}]} +{"seq_id": "289733815", "text": "import numpy as np\r\nimport pandas as pd\r\nimport math\r\nimport os\r\nimport scipy.io as scio\r\n\r\nnp.random.seed(0)\r\nfileDir = os.getcwd()\r\nACTIONS = ['H-BPSK', 'L-BPSK', 'H-QPSK', 'L-QPSK', 'H-8PSK', 'L-8PSK'] # 三种调制方式\r\nstateData = np.array([3, 5, 7, 11, 13, 15, 17, 23, 25, 27])\r\nENERGY = 400\r\nBERLimit = 0.001\r\nN_STATES = ENERGY # 状态数量\r\nEPSILON = 0.9 # 贪婪度,90%几率选择最优动作\r\nALPHA = 0.1 # 学习率\r\nGAMMA = 0.9 # 之前奖励的衰减值\r\nMAX_EPISODES = 5000 # 回合数\r\n\r\n\r\ndef build_q_table(n_states, actions):\r\n table = pd.DataFrame( # 使用pandas创建一个表格\r\n np.zeros((n_states, len(actions))), # 全0初始化Q表,行为状态个数,列为动作个数\r\n columns=actions, # index 行索引 columns 列索引\r\n )\r\n # print(table) # show table\r\n return table\r\n\r\n\r\n# def choose_action(state, q_table, counter): # 根据state选择动作actions\r\n# # This is how to choose an action\r\n# state_actions = q_table.iloc[state, :] # 按索引号从q表中取所有动作,state:目前位置\r\n# if counter > 3800:\r\n# epsl = 1\r\n# else:\r\n# epsl = EPSILON\r\n#\r\n# if (np.random.uniform() > epsl) or ((state_actions == 0).all()): # 随机数大于0.9即10%几率或state对应值全为0使用随机动作\r\n# action_name = np.random.choice(ACTIONS) # 随机选择动作\r\n# else: # 90%几率\r\n# action_name = state_actions.idxmax() # 使用Q表中state对应值最大的动作\r\n# return action_name\r\n\r\n\r\ndef choose_action(state, q_table): # 根据state选择动作actions\r\n # This is how to choose an action\r\n state_actions = q_table.iloc[state, :] # 按索引号从q表中取所有动作,state:目前位置\r\n if (np.random.uniform() > EPSILON) or ((state_actions == 0).all()): # 随机数大于0.9即10%几率或state对应值全为0使用随机动作\r\n action_name = np.random.choice(ACTIONS) # 随机选择动作\r\n else: # 90%几率\r\n action_name = state_actions.idxmax() # 使用Q表中state对应值最大的动作\r\n return action_name\r\n\r\n\r\ndef choose_snr(stepCounter):\r\n count = stepCounter % 20\r\n rand = np.random.uniform()\r\n if count < 10:\r\n snrNum = stateData[count]\r\n else:\r\n count = 19 - count\r\n snrNum = stateData[count]\r\n if rand < 0.2:\r\n rand_snr = np.random.randint(-1, 2)\r\n elif rand < 0.5:\r\n rand_snr = np.random.randint(-2, 3)\r\n elif rand < 0.8:\r\n rand_snr = np.random.randint(-3, 4)\r\n else:\r\n rand_snr = 0\r\n snr = snrNum + rand_snr\r\n return snr\r\n\r\n\r\ndef get_env_feedback(S, A, energy, Data): # 动作对状态环境的影响\r\n snr = choose_snr(S)\r\n if A == 'H-BPSK':\r\n energy_snr = snr + 2\r\n BER = 0.5 * math.erfc(math.sqrt(energy_snr))\r\n energy = energy - 2\r\n if BER <= BERLimit:\r\n R = (1-BER) * 2 * math.log(2, 2)\r\n Data += 2\r\n else:\r\n R = -10\r\n elif A == 'L-BPSK':\r\n energy_snr = snr + 1\r\n BER = 0.5 * math.erfc(math.sqrt(energy_snr))\r\n energy -= 1\r\n if BER <= BERLimit:\r\n R = (1-BER) * 1 * math.log(2, 2)\r\n Data += 1\r\n else:\r\n R = -10\r\n elif A == 'H-QPSK':\r\n energy_snr = snr + 2\r\n BER = math.erfc(math.sqrt(4*energy_snr)*math.sin(math.pi/8))\r\n energy = energy - 2\r\n if BER <= BERLimit:\r\n R = (1-BER) * 4 * math.log(4, 2)\r\n Data += 4\r\n else:\r\n R = -10\r\n elif A == 'L-QPSK':\r\n energy_snr = snr + 1\r\n BER = math.erfc(math.sqrt(4*energy_snr)*math.sin(math.pi/8))\r\n energy -= 1\r\n if BER <= BERLimit:\r\n R = (1-BER) * 3 * math.log(4, 2)\r\n Data += 3\r\n else:\r\n R = -10\r\n elif A == 'H-8PSK':\r\n energy_snr = snr + 2\r\n BER = math.erfc(math.sqrt(6*energy_snr)*math.sin(math.pi/16))\r\n energy = energy - 2\r\n if BER <= BERLimit:\r\n R = (1-BER) * 6 * math.log(8, 2)\r\n Data += 6\r\n else:\r\n R = -10\r\n else:\r\n energy_snr = snr + 1\r\n BER = math.erfc(math.sqrt(6*energy_snr)*math.sin(math.pi/16))\r\n energy -= 1\r\n if BER <= BERLimit:\r\n R = (1-BER) * 5 * math.log(8, 2)\r\n Data += 5\r\n else:\r\n R = -10\r\n S_ = ENERGY - energy # 更新状态\r\n return S_, R, energy, Data\r\n\r\n\r\ndef rl():\r\n # main part of RL loop\r\n qTable = build_q_table(N_STATES, ACTIONS) # 创建Q表\r\n qTable_temp = qTable.copy()\r\n result = np.zeros((MAX_EPISODES, 2)) # 该回合传输用的次数\r\n for episode in range(MAX_EPISODES): # MAX_EPISODES个回合的循环\r\n energy = ENERGY\r\n step_counter = 0\r\n S = 0 # 初始位置\r\n data = 0\r\n is_terminated = False\r\n while not is_terminated:\r\n A = choose_action(S, qTable) # 选择动作\r\n S_, R, energy, data = get_env_feedback(S, A, energy, data) # 获得之后的状态及奖励\r\n if energy <= 0: # 能量用光或数据传完则结束这一回合\r\n S_ = 'terminal' # 添加传输完成标志\r\n q_predict = qTable.loc[S, A] # 估计值\r\n if S_ != 'terminal':\r\n q_target = R + GAMMA * qTable.iloc[S_, :].max() # q-leaning 真实值\r\n else:\r\n is_terminated = True # 传输结束\r\n q_target = R\r\n cishu = \"次数:\" + str(step_counter) + \" 回合:\" + str(episode) + \" 传输数据量:\" + str(data)\r\n result[episode, :] = (episode+1, data) # 该episode回合的传输数据量\r\n print('\\r{}'.format(cishu), end='')\r\n qTable.loc[S, A] += ALPHA * (q_target - q_predict) # 更新Q表\r\n S = S_ # 更新状态\r\n step_counter += 1\r\n if episode == (MAX_EPISODES-2):\r\n qTable_temp = qTable.copy()\r\n error_calculate(qTable_temp, qTable)\r\n return qTable, result\r\n\r\n\r\ndef error_calculate(table_first, table_last):\r\n table_first_np = table_first.values\r\n table_last_np = table_last.values\r\n print(\" MAE:\" + str(np.mean(np.abs(table_last_np - table_first_np))))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n Q_TABLE, Result = rl()\r\n dataNew = fileDir + '\\\\Q_Table.csv'\r\n resultNew = fileDir + '\\\\result.mat'\r\n scio.savemat(resultNew, {'Result': Result})\r\n Q_TABLE.to_csv(dataNew)\r\n", "sub_path": "QLearning.py", "file_name": "QLearning.py", "file_ext": "py", "file_size_in_byte": 6538, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.random.seed", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 8, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 10, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.random.uniform", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 47, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 48, "usage_type": "attribute"}, {"api_name": "numpy.random.uniform", "line_number": 56, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 56, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 63, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 65, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 67, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 67, "usage_type": "attribute"}, {"api_name": "math.erfc", "line_number": 78, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 78, "usage_type": "call"}, {"api_name": "math.log", "line_number": 81, "usage_type": "call"}, {"api_name": "math.erfc", "line_number": 87, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 87, "usage_type": "call"}, {"api_name": "math.log", "line_number": 90, "usage_type": "call"}, {"api_name": "math.erfc", "line_number": 96, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 96, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 96, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 96, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 99, "usage_type": "call"}, {"api_name": "math.erfc", "line_number": 105, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 105, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 105, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 105, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 108, "usage_type": "call"}, {"api_name": "math.erfc", "line_number": 114, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 114, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 114, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 114, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 117, "usage_type": "call"}, {"api_name": "math.erfc", "line_number": 123, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 123, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 123, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 123, "usage_type": "attribute"}, {"api_name": "math.log", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 171, "usage_type": "call"}, {"api_name": "scipy.io.savemat", "line_number": 178, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 178, "usage_type": "name"}]} +{"seq_id": "204481973", "text": "# -*- coding: utf-8 -*-\n\"\"\"Module providing standalone content panel edit forms\"\"\"\nfrom plone.app.textfield import RichText\nfrom plone.autoform.interfaces import IFormFieldProvider\nfrom plone.namedfile import field as named_file\nfrom zope import schema\nfrom zope.interface import Interface, provider\n\nfrom ade25.panelpage import MessageFactory as _\n\n\n@provider(IFormFieldProvider)\nclass IHPHWidgetImagePoster(Interface):\n \"\"\" Content Panel Storage Slots \"\"\"\n\n image = named_file.NamedBlobImage(\n title=_(u\"Poster Image\"),\n required=True\n )\n image_caption = schema.TextLine(\n title=_(u\"Poster Image Copyright Information\"),\n required=False\n )\n text = RichText(\n title=_(u\"Text\"),\n required=False,\n allowed_mime_types=('text/html', ),\n )\n", "sub_path": "src/hph.widgets/hph/widgets/widgets/image/interfaces.py", "file_name": "interfaces.py", "file_ext": "py", "file_size_in_byte": 806, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "zope.interface.Interface", "line_number": 13, "usage_type": "name"}, {"api_name": "plone.namedfile.field.NamedBlobImage", "line_number": 16, "usage_type": "call"}, {"api_name": "plone.namedfile.field", "line_number": 16, "usage_type": "name"}, {"api_name": "ade25.panelpage.MessageFactory", "line_number": 17, "usage_type": "call"}, {"api_name": "zope.schema.TextLine", "line_number": 20, "usage_type": "call"}, {"api_name": "zope.schema", "line_number": 20, "usage_type": "name"}, {"api_name": "ade25.panelpage.MessageFactory", "line_number": 21, "usage_type": "call"}, {"api_name": "plone.app.textfield.RichText", "line_number": 24, "usage_type": "call"}, {"api_name": "ade25.panelpage.MessageFactory", "line_number": 25, "usage_type": "call"}, {"api_name": "zope.interface.provider", "line_number": 12, "usage_type": "call"}, {"api_name": "plone.autoform.interfaces.IFormFieldProvider", "line_number": 12, "usage_type": "argument"}]} +{"seq_id": "532039347", "text": "from base.base_page import BasePage\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\nimport time\n\n\nclass FamilyText(BasePage):\n\n def __init__(self, driver):\n super().__init__(driver)\n self.driver = driver\n\n\n #locators\n _family_radio_btn = \"//*[@id='send_to_family_member']\" #click\n _select_family_member_dd = \"//select[@id='family_members']\" #click\n _select_family_option = \"//option[@value='1: Object']\" #click\n _text_radio_btn = \"//input[@id='channel_sms']\" #click\n _button_send_close = \"//button[@test-id='sendandclose']\" #click\n _green_validation = \"//div[@role='alertdialog']\" #validate the form has been sent by modento\n #api call to confirm the form was delivered\n\n def family_initial_setup(self):\n self.driver.find_element(By.XPATH, self._family_radio_btn).click()\n time.sleep(1)\n self.driver.find_element(By.XPATH, self._select_family_member_dd).click()\n self.driver.find_element(By.XPATH, self._select_family_option).click()\n self.driver.find_element(By.XPATH, self._text_radio_btn).click()\n\n def family_send_and_close_button(self):\n self.driver.find_element(By.XPATH, self._button_send_close).click()\n\n def verify_form_sent_to_family(self):\n forms_sent_validation = self.driver.find_element(By.XPATH, self._green_validation)\n assert forms_sent_validation.is_displayed()\n", "sub_path": "pages/family_text.py", "file_name": "family_text.py", "file_ext": "py", "file_size_in_byte": 1447, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "base.base_page.BasePage", "line_number": 7, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 24, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 24, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 26, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 26, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 27, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 27, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 28, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 28, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 31, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 31, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 34, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 34, "usage_type": "name"}]} +{"seq_id": "179687170", "text": "import json\n\n\nclass Item:\n\n def __init__(self, nom, prix, cat): # Notre méthode constructeur\n self.nom = nom\n self.prix = prix\n self.stock = None\n self.isformule = False\n self.categorie = cat\n\ntab = []\ntab.append(Item(\"Poulet basquaise\", 15, \"plat\"))\ntab.append(Item(\"Steak haché\", 7, \"plat\"))\ntab.append(Item(\"Mousse au caca\", 2.5, \"dessert\"))\n\nwith open('data.json', 'w') as outfile:\n json.dump([ob.__dict__ for ob in tab], outfile)", "sub_path": "item.py", "file_name": "item.py", "file_ext": "py", "file_size_in_byte": 477, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "json.dump", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "213424671", "text": "# -*- coding: utf-8 -*-\n\nfrom nbsite.shared_conf import *\n\nproject = u'Param'\nauthors = u'HoloViz authors'\ncopyright = u'\\u00a9 2005-2018, ' + authors\ndescription = 'Declarative Python programming using Parameters.'\n\nimport param\nversion = release = param.__version__\n\nhtml_static_path += ['_static']\nhtml_theme = 'sphinx_holoviz_theme'\nhtml_theme_options = {\n 'logo':'logo.png',\n 'favicon':'favicon.ico',\n# 'css':'site.css'\n}\n\n_NAV = (\n ('API', 'Reference_Manual/param'),\n ('About', 'About'),\n)\n\nhtml_context.update({\n 'PROJECT': project,\n 'DESCRIPTION': description,\n 'AUTHOR': authors,\n # canonical URL (for search engines); can ignore for local builds\n 'WEBSITE_SERVER': 'https://param.holoviz.org',\n 'VERSION': version,\n 'GOOGLE_ANALYTICS_UA': 'UA-154795830-6',\n 'NAV': _NAV,\n 'LINKS': _NAV,\n 'SOCIAL': (\n ('Gitter', '//gitter.im/pyviz/pyviz'),\n ('Github', '//github.com/ioam/param'),\n )\n})\n", "sub_path": "doc/conf.py", "file_name": "conf.py", "file_ext": "py", "file_size_in_byte": 959, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "param.__version__", "line_number": 11, "usage_type": "attribute"}]} +{"seq_id": "399632259", "text": "#!/usr/bin/env python\n\n\"\"\"Competition_Code, 9/15/16, Sajad Azami, Taher Ahmadi\"\"\"\n__author__ = 'sajjadaazami@gmail.com (Sajad Azami), 14taher@gmail.com (Taher Ahmadi)'\n\nimport random\nimport sys\nimport threading\nimport time\n\nimport actionlib\nimport rospy\nimport smach\nimport smach_ros\nimport tf\nfrom actionlib_msgs.msg import GoalStatusArray\nfrom behaviour_smach.msg import *\nfrom move_base_msgs.msg import *\nfrom nav_msgs.msg import OccupancyGrid\n\n# Variables Declaration\ngoals_list = [] # goals given to robot will be appended to this\ncurrent_goal_status = 0 # goal status\nstatus_dict = {'PENDING': 0, 'ACTIVE': 1, 'PREEMPTED': 2, 'SUCCEEDED': 3, 'ABORTED': 4, 'REJECTED': 5, 'PREEMPTING': 6,\n 'RECALLING': 7, 'RECALLED': 8, 'LOST': 9}\nglobal_costmap = 0 # 2d array of costmap\nrobot_namespace = ''\ncurrent_direction = 0 # current direction of robot explore(0-4)\n\n\n# Functions Declaration\n\n# get current position of robot using tf translation\ndef get_current_position():\n listener = tf.TransformListener()\n rate = rospy.Rate(10.0)\n flag = True\n trans = 0\n while flag and not rospy.is_shutdown():\n try:\n (trans, rot) = listener.lookupTransform((robot_namespace + '/map'), (robot_namespace + '/base_link'),\n rospy.Time(0))\n rospy.loginfo(trans)\n flag = False\n except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):\n continue\n return trans\n\n\n# subscriber method callback from /move_base/status\ndef callback_direction_status(data):\n global current_direction\n current_direction = data.direction\n return current_direction\n\n\n# subscriber method from /move_base/status\ndef listener_direction_status():\n rospy.Subscriber((robot_namespace + \"/direction_status\"), DirectionStatus, callback_direction_status)\n return\n\n\n# get current position of robot using tf translation\ndef get_current_direction():\n listener_direction_status()\n\n\n# random goal generator(Use NE, NW, SW and SW for directions)\ndef get_random_goal(exp_type):\n if exp_type == 'NW': # NW\n x = random.uniform(-50.0, 0)\n y = random.uniform(0, 50.0)\n w = 1\n z = 1\n elif exp_type == 'NE': # NE\n x = random.uniform(0.0, 50.0)\n y = random.uniform(0.0, 50.0)\n w = 1\n z = 1\n elif exp_type == 'SW': # SW\n x = random.uniform(-50.0, 0)\n y = random.uniform(-50.0, 0)\n w = 1\n z = 1\n elif exp_type == 'SE': # SE\n x = random.uniform(0, 50.0)\n y = random.uniform(-50.0, 0)\n w = 1\n z = 1\n else:\n x = random.uniform(-50.0, 50.0)\n y = random.uniform(-50.0, 50.0)\n w = 1\n z = 1\n return [x, y, 0, w, 0, 0, z]\n\n\n# subscriber method callback from /move_base/status\ndef callback_goal_status(data):\n global current_goal_status\n current_goal_status = data.status_list[len(data.status_list) - 1].status\n\n\n# subscriber method from /move_base/status\ndef get_current_goal_status():\n rospy.Subscriber((robot_namespace + \"/move_base/status\"), GoalStatusArray, callback_goal_status)\n return current_goal_status\n\n\n# subscriber method callback from /move_base/global_costmap/costmap\ndef callback_global_costmap(data):\n global global_costmap\n global_costmap = data.data\n\n\n# subscriber method from /move_base/global_costmap/costmap\ndef listener_global_costmap():\n rospy.Subscriber((robot_namespace + \"/move_base/global_costmap/costmap\"), OccupancyGrid, callback_global_costmap)\n\n\n# publishes goal on move_base/goal using SimpleActionClient\n# inputs: position x, y, z, orientation w, x, y, z\ndef move_to(pos_x, pos_y, pos_z, ornt_w, ornt_x, ornt_y, ornt_z):\n # Simple Action Client\n sac = actionlib.SimpleActionClient((robot_namespace + '/move_base'), MoveBaseAction)\n\n # create goal\n goal = MoveBaseGoal()\n\n # set goal\n goal.target_pose.pose.position.x = pos_x\n goal.target_pose.pose.position.y = pos_y\n goal.target_pose.pose.orientation.w = ornt_w\n goal.target_pose.pose.orientation.z = ornt_z\n goal.target_pose.header.frame_id = (robot_namespace + '/odom')\n goal.target_pose.header.stamp = rospy.Time.now()\n\n # start listener\n sac.wait_for_server()\n\n # send goal\n sac.send_goal(goal)\n\n # finish\n # sac.wait_for_result()\n\n\n# States Declaration\n\n# define Detect state\nclass Detect(smach.State):\n def __init__(self):\n smach.State.__init__(self, outcomes=['goalReached', 'goalCancelled'])\n # self.mutex = threading.Lock()\n # self.found_received = False\n\n # def callback(self, msg):\n # self.mutex.acquire()\n # if msg.found == 1:\n # self.found_received = True\n # self.mutex.release()\n #\n def execute(self, userdata):\n rospy.loginfo('Executing WaitForVictim')\n while get_current_goal_status() == 1:\n rospy.loginfo('Detecting')\n rospy.loginfo(get_current_goal_status())\n time.sleep(1)\n return 'goalReached'\n\n\n# define Explore state\nclass Explore(smach.State):\n def __init__(self):\n smach.State.__init__(self, outcomes=['goalPublished', 'goalNotPublished'])\n # rospy.Subscriber(\"/move_base/global_costmap/costmap\", OccupancyGrid, callback_global_costmap())\n\n def execute(self, userdata):\n rospy.loginfo('Executing state Explore')\n # TODO add to Documentation : Goal format is goal_list_temp = [x, y, 0, w, 0, 0, x]\n # current_position = get_current_position() # current translation of robot in an array[][]\n\n goal_temp = get_random_goal(5) # get random goal\n goals_list.append(goal_temp) # add goal to goal list(for further uses)\n # move_to(goal_temp[0] + current_position[0], goal_temp[1] + current_position[1], goal_temp[2],\n # goal_temp[3], goal_temp[4], goal_temp[5], goal_temp[6], )\n move_to(goal_temp[0], goal_temp[1], goal_temp[2],\n goal_temp[3], goal_temp[4], goal_temp[5], goal_temp[6], )\n if get_current_goal_status() == 1:\n return 'goalPublished'\n else:\n return 'goalNotPublished'\n\n\n# Main Function\n\ndef main():\n rospy.init_node('behaviour')\n sm = smach.StateMachine(\n outcomes=['SHUTDOWN'])\n global robot_namespace\n if len(sys.argv) > 1:\n robot_namespace = sys.argv[1]\n with sm:\n smach.StateMachine.add('EXPLORE', Explore(),\n transitions={'goalPublished': 'DETECT', 'goalNotPublished': 'EXPLORE'})\n\n smach.StateMachine.add('DETECT', Detect(),\n transitions={'goalCancelled': 'EXPLORE', 'goalReached': 'EXPLORE'})\n\n sis = smach_ros.IntrospectionServer('Behavior', sm, (robot_namespace + '/SM_ROOT'))\n sis.start()\n\n # Execute SMACH plan\n outcome = sm.execute()\n\n rospy.spin()\n sis.stop()\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "Competition_Code/AI.py", "file_name": "AI.py", "file_ext": "py", "file_size_in_byte": 6971, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "tf.TransformListener", "line_number": 35, "usage_type": "call"}, {"api_name": "rospy.Rate", "line_number": 36, "usage_type": "call"}, {"api_name": "rospy.is_shutdown", "line_number": 39, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 42, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 43, "usage_type": "call"}, {"api_name": "tf.LookupException", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tf.ConnectivityException", "line_number": 45, "usage_type": "attribute"}, {"api_name": "tf.ExtrapolationException", "line_number": 45, "usage_type": "attribute"}, {"api_name": "rospy.Subscriber", "line_number": 59, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 71, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 72, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 76, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 77, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 81, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 82, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 86, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 87, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 91, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 92, "usage_type": "call"}, {"api_name": "rospy.Subscriber", "line_number": 106, "usage_type": "call"}, {"api_name": "actionlib_msgs.msg.GoalStatusArray", "line_number": 106, "usage_type": "argument"}, {"api_name": "rospy.Subscriber", "line_number": 118, "usage_type": "call"}, {"api_name": "nav_msgs.msg.OccupancyGrid", "line_number": 118, "usage_type": "argument"}, {"api_name": "actionlib.SimpleActionClient", "line_number": 125, "usage_type": "call"}, {"api_name": "rospy.Time.now", "line_number": 136, "usage_type": "call"}, {"api_name": "rospy.Time", "line_number": 136, "usage_type": "attribute"}, {"api_name": "smach.State", "line_number": 151, "usage_type": "attribute"}, {"api_name": "smach.State.__init__", "line_number": 153, "usage_type": "call"}, {"api_name": "smach.State", "line_number": 153, "usage_type": "attribute"}, {"api_name": "rospy.loginfo", "line_number": 164, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 166, "usage_type": "call"}, {"api_name": "rospy.loginfo", "line_number": 167, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 168, "usage_type": "call"}, {"api_name": "smach.State", "line_number": 173, "usage_type": "attribute"}, {"api_name": "smach.State.__init__", "line_number": 175, "usage_type": "call"}, {"api_name": "smach.State", "line_number": 175, "usage_type": "attribute"}, {"api_name": "rospy.loginfo", "line_number": 179, "usage_type": "call"}, {"api_name": "rospy.init_node", "line_number": 198, "usage_type": "call"}, {"api_name": "smach.StateMachine", "line_number": 199, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 202, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 203, "usage_type": "attribute"}, {"api_name": "smach.StateMachine.add", "line_number": 205, "usage_type": "call"}, {"api_name": "smach.StateMachine", "line_number": 205, "usage_type": "attribute"}, {"api_name": "smach.StateMachine.add", "line_number": 208, "usage_type": "call"}, {"api_name": "smach.StateMachine", "line_number": 208, "usage_type": "attribute"}, {"api_name": "smach_ros.IntrospectionServer", "line_number": 211, "usage_type": "call"}, {"api_name": "rospy.spin", "line_number": 217, "usage_type": "call"}]} +{"seq_id": "517367488", "text": "#\n# Copyright 2016 The BigDL Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport math\nimport os\n\nimport pytest\nfrom unittest import TestCase\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom bigdl.orca.data import SparkXShards\nfrom bigdl.orca.data.tf.data import Dataset\nfrom bigdl.orca.learn.tf2 import Estimator\nfrom bigdl.orca import init_orca_context, stop_orca_context, OrcaContext\n\n\nclass SimpleModel(tf.keras.Model):\n def __init__(self):\n super().__init__()\n self.simple_model = tf.keras.Sequential([\n tf.keras.layers.Dense(10),\n tf.keras.layers.Dense(1)])\n\n def call(self, inputs):\n return self.simple_model(inputs[\"item\"])\n\n def train_step(self, data):\n y = data[\"label\"]\n\n with tf.GradientTape() as tape:\n y_pred = self(data, training=True)\n loss = self.compiled_loss(y, y_pred, regularization_losses=self.losses)\n\n trainable_vars = self.trainable_variables\n gradients = tape.gradient(loss, trainable_vars)\n self.optimizer.apply_gradients(zip(gradients, trainable_vars))\n self.compiled_metrics.update_state(y, y_pred)\n return {m.name: m.result() for m in self.metrics}\n\n def test_step(self, data):\n y = data[\"label\"]\n\n y_pred = self(data, training=False)\n self.compiled_loss(y, y_pred, regularization_losses=self.losses)\n\n self.compiled_metrics.update_state(y, y_pred)\n return {m.name: m.result() for m in self.metrics}\n\n\ndef compile_args(config):\n if config is None:\n lr = 1e-3\n else:\n lr = config[\"lr\"]\n args = {\n \"optimizer\": tf.keras.optimizers.SGD(lr),\n \"loss\": \"mean_squared_error\",\n \"metrics\": [\"mean_squared_error\"]}\n return args\n\n\ndef simple_model(config):\n import tensorflow as tf\n model = tf.keras.models.Sequential([tf.keras.layers.Dense(10, input_shape=(1,)),\n tf.keras.layers.Dense(1)])\n return model\n\n\ndef model_creator(config):\n model = simple_model(config)\n model.compile(**compile_args(config))\n return model\n\n\ndef model_creator_for_orca_dataset(config):\n model = SimpleModel()\n model.compile(**compile_args(config))\n return model\n\n\ndef get_estimator(workers_per_node=2, model_fn=model_creator):\n estimator = Estimator.from_keras(model_creator=model_fn,\n config={\"lr\": 1e-3},\n workers_per_node=workers_per_node,\n backend=\"ray\")\n return estimator\n\n\nclass TestTF2EstimatorTF2Backend(TestCase):\n def setUp(self):\n self.sc = init_orca_context(init_ray_on_spark=True)\n\n def tearDown(self):\n stop_orca_context()\n\n def test_orca_tf_dataset(self):\n rdd = self.sc.parallelize(range(5))\n shard = SparkXShards(rdd)\n\n def generate_dataset(df, a, b, size):\n items = np.array([[i / size] for i in range(size)], dtype=np.float32)\n labels = np.array([a * x[0] + b for x in items], dtype=np.float32)\n return {\"item\": items, \"label\": labels}\n\n train_shard = shard.transform_shard(generate_dataset, 5, 10, 1000)\n test_shard = shard.transform_shard(generate_dataset, 5, 10, 100)\n\n train_dataset = Dataset.from_tensor_slices(train_shard)\n test_dataset = Dataset.from_tensor_slices(test_shard)\n train_step = math.ceil(5 * 1000 / 32)\n test_step = math.ceil(5 * 100 / 32)\n\n orca_estimator = get_estimator(workers_per_node=2, model_fn=model_creator_for_orca_dataset)\n\n start_eval_stats = orca_estimator.evaluate(data=test_dataset,\n num_steps=test_step, batch_size=32)\n\n train_stats = orca_estimator.fit(data=train_dataset,\n epochs=2, batch_size=32, steps_per_epoch=train_step)\n print(train_stats)\n\n end_eval_stats = orca_estimator.evaluate(data=test_dataset,\n num_steps=test_step, batch_size=32)\n\n assert orca_estimator.get_model(sample_input={\"item\": np.array([[1]], dtype=np.float32)})\n\n dloss = end_eval_stats[0][\"validation_loss\"] - start_eval_stats[0][\"validation_loss\"]\n dmse = (end_eval_stats[0][\"validation_mean_squared_error\"] -\n start_eval_stats[0][\"validation_mean_squared_error\"])\n print(f\"dLoss: {dloss}, dMSE: {dmse}\")\n assert dloss < 0 and dmse < 0, \"training sanity check failed. loss increased!\"\n\n pred_shards = orca_estimator.predict(test_dataset)\n pred1 = pred_shards.collect()\n\n path = \"/tmp/model_test_orca_dataset.ckpt\"\n try:\n orca_estimator.save_checkpoint(path)\n orca_estimator.shutdown()\n est = get_estimator(model_fn=model_creator_for_orca_dataset)\n with self.assertRaises(Exception) as context:\n est.load_checkpoint(path)\n self.assertTrue('Failed to set model weights, please provide real tensor data'\n in str(context.exception))\n est.load_checkpoint(path, sample_input={\"item\": np.array([[1]], dtype=np.float32)})\n result_shards = est.predict(test_dataset)\n pred2 = result_shards.collect()\n finally:\n os.remove(path)\n\n assert np.allclose(pred1[0][\"prediction\"], pred2[0][\"prediction\"])\n\n\nif __name__ == \"__main__\":\n pytest.main([__file__])", "sub_path": "python/orca/test/bigdl/orca/learn/ray/tf/test_tf2estimator_ray_backend.py", "file_name": "test_tf2estimator_ray_backend.py", "file_ext": "py", "file_size_in_byte": 5998, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "tensorflow.keras", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.Sequential", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 36, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 36, "usage_type": "attribute"}, {"api_name": "tensorflow.GradientTape", "line_number": 44, "usage_type": "call"}, {"api_name": "tensorflow.keras.optimizers.SGD", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 70, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.models.Sequential", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 78, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 78, "usage_type": "call"}, {"api_name": "tensorflow.keras.layers.Dense", "line_number": 79, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 79, "usage_type": "attribute"}, {"api_name": "bigdl.orca.learn.tf2.Estimator.from_keras", "line_number": 96, "usage_type": "call"}, {"api_name": "bigdl.orca.learn.tf2.Estimator", "line_number": 96, "usage_type": "name"}, {"api_name": "unittest.TestCase", "line_number": 103, "usage_type": "name"}, {"api_name": "bigdl.orca.init_orca_context", "line_number": 105, "usage_type": "call"}, {"api_name": "bigdl.orca.stop_orca_context", "line_number": 108, "usage_type": "call"}, {"api_name": "bigdl.orca.data.SparkXShards", "line_number": 112, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 115, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 116, "usage_type": "attribute"}, {"api_name": "bigdl.orca.data.tf.data.Dataset.from_tensor_slices", "line_number": 122, "usage_type": "call"}, {"api_name": "bigdl.orca.data.tf.data.Dataset", "line_number": 122, "usage_type": "name"}, {"api_name": "bigdl.orca.data.tf.data.Dataset.from_tensor_slices", "line_number": 123, "usage_type": "call"}, {"api_name": "bigdl.orca.data.tf.data.Dataset", "line_number": 123, "usage_type": "name"}, {"api_name": "math.ceil", "line_number": 124, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 125, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 139, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 159, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.allclose", "line_number": 165, "usage_type": "call"}, {"api_name": "pytest.main", "line_number": 169, "usage_type": "call"}]} +{"seq_id": "621026308", "text": "from sympy import Matrix, sign\nfrom sympy.polys.polytools import LC\nfrom functools import partial\n\ncoefs = lambda n, v, p: [p.as_expr().coeff(v, k) for k in range(n)][::-1]\n\n\ndef SyHa(P, Q, j, v):\n p = P.degree(v)\n q = Q.degree(v)\n assert P.gens == Q.gens\n coef = partial(coefs, p + q - j, v)\n return Matrix([coef(P * v ** k) for k in range(q - j - 1, -1, -1)] + [coef(Q * v ** k) for k in range(0, p - j, 1)])\n\n\ndef sRes(P, Q, j=-1, v=0):\n if not v:\n v = P.gens[-1]\n p = P.degree(v)\n q = Q.degree(v)\n if p <= q:\n raise ValueError(\"P must have a greater degree than Q\")\n\n def sres(j):\n n = p + q - 2 * j\n if j == p:\n return sign(LC(P, v))\n return (SyHa(P, Q, j, v)[:, :n]).det()\n\n if j == -1:\n return [sres(j) for j in range(p, -1, -1)]\n\n return sres(j)\n\ndef trunc(Q, v):\n from sympy.polys.polyerrors import PolynomialError\n try:\n q = Q.degree(v)\n except PolynomialError:\n q = 0\n if not Q:\n return []\n elif q == 0:\n return [Q]\n else:\n ts = [Q]\n ts.extend(trunc(truncd(q-1, v, Q), v))\n return ts\n\ndef truncd(d, v, Q):\n assert d <= Q.degree(v)\n from sympy import Poly\n cs = coefs(d+1, v, Q)\n cs.reverse()\n p = sum(c*v**k for k,c in enumerate(cs))\n return Poly(p) if p else Poly(p, v)\n\n\ndef elim(pols, v):\n deg = lambda p: p.degree(v)\n els = []\n for p in pols:\n if deg(p) < 2: continue\n for r in trunc(p, v):\n for j in range(deg(r) - 1):\n els.append(sRes(r, r.diff(v), j))\n\n tps = truncs(pols)\n for r in tps:\n for s in tps:\n if deg(r) > deg(s):\n for j in range(deg(s)):\n els.append(sRes(r, s, j))\n elif deg(s) > deg(r):\n for j in range(deg(r)):\n els.append(sRes(s, r, j))\n else:\n rb = LC(s, v) * r - LC(R, v) * s\n for j in range(deg(rb)):\n els.append(sRes(s, rb, j))\n\n for r in tps:\n els.append(LC(r, v))\n\n return els\n\n\nif __name__ == '__main__':\n from sympy import Poly\n\n P = Poly('x*(x^3 - x)')\n Q = Poly('x^3 + x')\n\n print(sRes(P, Q))\n\n P = Poly('x^2 + y^2 + z^2 -1')\n x, y, z = P.gens\n P1 = P.diff(z)\n print(SyHa(P, P1, 0, z)) # from [SPR, example 5.17]\n for j in range(2):\n print(j, sRes(P, P1, j).simplify())\n\n Q = Poly('x^2 + y^2 -1')\n x, y = Q.gens\n Q1 = Q.diff(y)\n print(SyHa(Q, Q1, 0, y))\n\n P = Poly('x^2 + y^2 + z^2 -1')\n x, y, z = P.gens\n\n for j in range(2):\n print(j, sRes(Q, Q1, j).simplify())\n\n print(elim([P], z))\n", "sub_path": "sylverster.py", "file_name": "sylverster.py", "file_ext": "py", "file_size_in_byte": 2692, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "functools.partial", "line_number": 12, "usage_type": "call"}, {"api_name": "sympy.Matrix", "line_number": 13, "usage_type": "call"}, {"api_name": "sympy.sign", "line_number": 27, "usage_type": "call"}, {"api_name": "sympy.polys.polytools.LC", "line_number": 27, "usage_type": "call"}, {"api_name": "sympy.polys.polyerrors.PolynomialError", "line_number": 39, "usage_type": "name"}, {"api_name": "sympy.Poly", "line_number": 56, "usage_type": "call"}, {"api_name": "sympy.polys.polytools.LC", "line_number": 78, "usage_type": "call"}, {"api_name": "sympy.polys.polytools.LC", "line_number": 83, "usage_type": "call"}, {"api_name": "sympy.Poly", "line_number": 91, "usage_type": "call"}, {"api_name": "sympy.Poly", "line_number": 92, "usage_type": "call"}, {"api_name": "sympy.Poly", "line_number": 96, "usage_type": "call"}, {"api_name": "sympy.Poly", "line_number": 103, "usage_type": "call"}, {"api_name": "sympy.Poly", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "409106186", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat May 19 11:49:13 2018\r\n\r\n@author: rickarhayd\r\n\"\"\"\r\n\r\n### Hayden Rickard ###\r\n### GISC405 - Python programming and databases ### \r\n### Victoria University ###\r\n\r\n### Smart Ideas Project: Finding, Batch Processing, and Sharing of Shapefiles & Feature Classes ###\r\n\r\n### This script will convert all Feature Classes/Shapefiles in a given folder to the other file type, storing them in a new folder/GDB ###\r\n\r\n#%% Importing Modules\r\n\r\nimport arcpy as ap\r\nimport os\r\nimport shutil\r\nfrom datetime import date\r\n\r\n#%% Set up ArcGIS environment settings\r\n \r\nap.env.overwriteOutput = \"TRUE\"\r\n\r\n#%% Setting up Parameters\r\n\r\nindat = ap.GetParameterAsText(0)\r\nmaind = ap.GetParameterAsText(1)\r\noutname = ap.GetParameterAsText(2)\r\nconvertToShp = ap.GetParameter(3)\r\n\r\n#%% Setting up Working Directory\r\n\r\nos.chdir(maind)\r\nwd = os.getcwd()\r\n\r\n#%% Create Output Folder (if converting to Shapefiles) or GDB (if converting to Feature Classes)\r\n### if it already exists delete it and create a new one\r\n\r\nap.env.workspace = wd\r\n\r\nif convertToShp == True:\r\n if ap.Exists(outname) == True:\r\n ap.AddMessage(\"Folder {0} already exists - deleting it and creating a new one\".format(outname))\r\n shutil.rmtree(outname, ignore_errors=True)\r\n outwd = ap.CreateFolder_management(wd, outname)\r\n ap.AddMessage(\"Folder {0} created\".format(outname))\r\n else:\r\n ap.AddMessage(\"Folder {0} doesn't exists, creating it\".format(outname))\r\n outwd = ap.CreateFolder_management(wd, outname)\r\n ap.AddMessage(\"Folder {0} created\".format(outname))\r\nelse:\r\n if ap.Exists(outname + \".gdb\") == True:\r\n ap.AddMessage(\"Geodatabase {0} exists - deleting it and creating a new one\".format(outname))\r\n shutil.rmtree(wd + outname + \".gdb\", ignore_errors=True)\r\n outwd = ap.CreateFileGDB_management(wd, outname+\".gdb\")\r\n ap.AddMessage(\"Geodatabase {0} created\".format(outname))\r\n else:\r\n ap.AddMessage(\"Geodatabase {0} doesn't exist in \".format(outname)+wd+\"\\\\\"+str(outname))\r\n outwd = ap.CreateFileGDB_management(wd, outname+\".gdb\")\r\n ap.AddMessage(\"Geodatabase {0} created\".format(outname))\r\n \r\n#%% Convert the data to Shapefiles/Feature Classes depending on the original file type, storing them in the created GDB\r\n\r\nap.env.workspace = indat\r\n\r\nif convertToShp == True:\r\n for FC in ap.ListFeatureClasses():\r\n ap.FeatureClassToFeatureClass_conversion(FC, str(outwd),(str(FC)+\"_\"+(str(date.today().strftime('%d_%m_%Y')))+\".shp\"))\r\n ap.AddMessage(\"{0} has been converted to a Shapefile and stored in {1}\".format(str(FC),str(outwd)))\r\nelse:\r\n for FC in ap.ListFeatureClasses():\r\n FCname = (str(FC)[:-4])\r\n ap.FeatureClassToFeatureClass_conversion(FC, str(outwd),(FCname+\"_\"+(str(date.today().strftime('%d_%m_%Y')))))\r\n ap.AddMessage(\"{0} has been converted to a Shapefile and stored in {1}\".format(FCname,str(outwd)))\r\n", "sub_path": "SmartIdeas_convert.py", "file_name": "SmartIdeas_convert.py", "file_ext": "py", "file_size_in_byte": 2965, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "arcpy.env", "line_number": 25, "usage_type": "attribute"}, {"api_name": "arcpy.GetParameterAsText", "line_number": 29, "usage_type": "call"}, {"api_name": "arcpy.GetParameterAsText", "line_number": 30, "usage_type": "call"}, {"api_name": "arcpy.GetParameterAsText", "line_number": 31, "usage_type": "call"}, {"api_name": "arcpy.GetParameter", "line_number": 32, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 36, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 37, "usage_type": "call"}, {"api_name": "arcpy.env", "line_number": 42, "usage_type": "attribute"}, {"api_name": "arcpy.Exists", "line_number": 45, "usage_type": "call"}, {"api_name": "arcpy.AddMessage", "line_number": 46, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 47, "usage_type": "call"}, {"api_name": "arcpy.CreateFolder_management", "line_number": 48, "usage_type": "call"}, {"api_name": "arcpy.AddMessage", "line_number": 49, "usage_type": "call"}, {"api_name": "arcpy.AddMessage", "line_number": 51, "usage_type": "call"}, {"api_name": "arcpy.CreateFolder_management", "line_number": 52, "usage_type": "call"}, {"api_name": "arcpy.AddMessage", "line_number": 53, "usage_type": "call"}, {"api_name": "arcpy.Exists", "line_number": 55, "usage_type": "call"}, {"api_name": "arcpy.AddMessage", "line_number": 56, "usage_type": "call"}, {"api_name": "shutil.rmtree", "line_number": 57, "usage_type": "call"}, {"api_name": "arcpy.CreateFileGDB_management", "line_number": 58, "usage_type": "call"}, {"api_name": "arcpy.AddMessage", "line_number": 59, "usage_type": "call"}, {"api_name": "arcpy.AddMessage", "line_number": 61, "usage_type": "call"}, {"api_name": "arcpy.CreateFileGDB_management", "line_number": 62, "usage_type": "call"}, {"api_name": "arcpy.AddMessage", "line_number": 63, "usage_type": "call"}, {"api_name": "arcpy.env", "line_number": 67, "usage_type": "attribute"}, {"api_name": "arcpy.ListFeatureClasses", "line_number": 70, "usage_type": "call"}, {"api_name": "arcpy.FeatureClassToFeatureClass_conversion", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 71, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 71, "usage_type": "name"}, {"api_name": "arcpy.AddMessage", "line_number": 72, "usage_type": "call"}, {"api_name": "arcpy.ListFeatureClasses", "line_number": 74, "usage_type": "call"}, {"api_name": "arcpy.FeatureClassToFeatureClass_conversion", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 76, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 76, "usage_type": "name"}, {"api_name": "arcpy.AddMessage", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "199637167", "text": "import tempfile\n\nfrom banditpylib.arms import BernoulliArm\nfrom banditpylib.bandits import OrdinaryBandit\nfrom banditpylib.learners.ordinary_learner import EpsGreedy\nfrom .single_player import SinglePlayerProtocol\n\n\nclass TestSinglePlayer:\n \"\"\"Test single player protocol\"\"\"\n\n def test_simple_run(self):\n means = [0.3, 0.5, 0.7]\n arms = [BernoulliArm(mean) for mean in means]\n ordinary_bandit = OrdinaryBandit(arms)\n eps_greedy_learner = EpsGreedy(arm_num=3, horizon=10)\n single_player = SinglePlayerProtocol(bandit=ordinary_bandit,\n learners=[eps_greedy_learner])\n temp_file = tempfile.NamedTemporaryFile()\n single_player.play(trials=3, output_filename=temp_file.name)\n with open(temp_file.name, 'r') as f:\n # check number of records is 3\n lines = f.readlines()\n assert len(lines) == 3\n", "sub_path": "banditpylib/protocols/single_player_test.py", "file_name": "single_player_test.py", "file_ext": "py", "file_size_in_byte": 868, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "banditpylib.arms.BernoulliArm", "line_number": 14, "usage_type": "call"}, {"api_name": "banditpylib.bandits.OrdinaryBandit", "line_number": 15, "usage_type": "call"}, {"api_name": "banditpylib.learners.ordinary_learner.EpsGreedy", "line_number": 16, "usage_type": "call"}, {"api_name": "single_player.SinglePlayerProtocol", "line_number": 17, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 19, "usage_type": "call"}, {"api_name": "single_player.play", "line_number": 20, "usage_type": "call"}]} +{"seq_id": "101957173", "text": "#!/usr/bin/env python\n\n\"\"\"\nSimple tornado app to list and read files.\n\nStart server:\npython svc.py -port=8001\n\n(Default port is 8000)\n\nUsing service\n\n/\nList:\n/data/path/to/dir\nRead:\n/data/path/to/file\nFilter:\n/data/path/to/file?rows=id1,id2&cols=colid1,colid2\n\nAll services return status_code 500 if there is any error.\n\n\"\"\"\nimport logging\n\nimport tornado.ioloop\nfrom tornado.options import define, options\nimport tornado.web\nimport json\n\nfrom oauth.google import GoogleOAuth2Handler, GoogleSignoutHandler\nfrom oauth.decorator import OAuthenticated\nfrom datastores.mongo import MongoDbQueryHandler\nfrom datastores.localfiles import LocalFileHandler\nfrom storage.mongo import MongoDbStorageHandler, GetUserinfo\nfrom storage.collections import MongoDbCollectionsHandler\nfrom scc.github import GitWebHookHandler\n\nfrom tabix.tabix_lookup import TabixLookupHandler\nfrom tabix.seqpeek_data_lookup import SeqPeekDataHandler\nfrom tabix.variant_summary_handler import VariantSummaryHandler\n\ndefine(\"data_path\", default=\"../..\", help=\"Path to data files\")\ndefine(\"port\", default=8000, help=\"run on the given port\", type=int)\ndefine(\"client_host\", default=\"http://localhost:8000\", help=\"Client URL for Google OAuth2\")\ndefine(\"client_id\", help=\"Client ID for Google OAuth2\")\ndefine(\"client_secret\", help=\"Client Secrets for Google OAuth2\")\ndefine(\"config_file\", help=\"Path to config file\")\ndefine(\"config_file_json\", help=\"Path to JSON config file\")\ndefine(\"authorized_users\", default=[], help=\"List of authorized user emails\")\ndefine(\"mongo_storage_uri\", default=\"mongodb://localhost:27017\", help=\"MongoDB URI in the form mongodb://username:password@hostname:port\")\ndefine(\"mongo_storage_db\", default=\"storage_db\", help=\"MongoDB database name\")\n\ndefine(\"mongo_datastores\", default=[(\"ds\", \"mongodb://localhost:27017\")], help=\"Lookup MongoDB configurations\")\ndefine(\"mongo_rows_limit\", default=1000, type=int, help=\"Lookup MongoDB limit on rows returned from query\")\ndefine(\"case_sensitive_lookups\", default=[], help=\"List of database names to apply case sensitive lookups\")\ndefine(\"github_repo_api_url\", help=\"Link to repository api url (see examples/svc.config)\")\ndefine(\"github_project_root\", help=\"Local path to main repository branch\")\ndefine(\"github_branches_root\", help=\"Local path to top-level branches directory\")\ndefine(\"github_postproc_cmd\", help=\"Command-line to execute after checkout\")\ndefine(\"github_git_cmd\", help=\"Path to git executable\", default=\"git\")\ndefine(\"github_branches_json_path\", help=\"Path to publish branches json\", default=\".\")\n\ndefine(\"verbose\", default=False, type=bool, help=\"Enable verbose printouts\")\n\ndefine(\"tabix_executable\", default=\"tabix\", help=\"Tabix executable\")\ndefine(\"tabix_lookups\", default={}, help=\"Tabix lookups configurations\")\ndefine(\"seqpeek_data_lookups\", default={}, help=\"SeqPeek data lookups configurations\")\ndefine(\"variant_summary_sources\", default={}, help=\"Variant Summary configurations\")\n\nsettings = {\n \"debug\": True,\n \"cookie_secret\": \"not_a_big_secret\"\n}\n\nserver_settings = {\n \"xheaders\" : True,\n \"address\" : \"0.0.0.0\"\n}\n\nclass DataStoreConfiguration(object):\n def __init__(self, uri, case_sensitive_databases):\n self.set_uri(uri)\n self.case_sensitive_databases = frozenset(case_sensitive_databases)\n\n def get_uri(self):\n return self._uri\n\n def set_uri(self, uri):\n self._uri = uri\n\n def is_case_sensitive_database(self, database_name):\n return database_name in self.case_sensitive_databases\n\n uri = property(get_uri, set_uri)\n\n\nclass MainHandler(tornado.web.RequestHandler):\n def get(self):\n items = []\n items.append({ \"id\": \"data\", \"uri\": self.request.path + \"data\" })\n items.append({ \"id\": \"datastores\", \"uri\": self.request.path + \"datastores\" })\n self.write({\"items\": items})\n self.set_status(200)\n\nclass WhoamiHandler(tornado.web.RequestHandler):\n @OAuthenticated\n def get(self):\n userkey = self.get_secure_cookie(\"whoami\")\n\n google_provider = { \"id\": \"google\", \"label\": \"Google+\", \"active\": False, \"logo\": \"https://www.google.com/images/icons/ui/gprofile_button-64.png\" }\n if not userkey is None:\n user = GetUserinfo(userkey)\n if not user is None:\n google_provider[\"active\"] = True\n google_provider[\"user\"] = {}\n if \"id_token\" in user and \"email\" in user[\"id_token\"]: google_provider[\"user\"][\"email\"] = user[\"id_token\"][\"email\"]\n if \"profile\" in user:\n user_profile = user[\"profile\"]\n if \"name\" in user_profile: google_provider[\"user\"][\"fullname\"] = user_profile[\"name\"]\n if \"picture\" in user_profile: google_provider[\"user\"][\"pic\"] = user_profile[\"picture\"]\n if \"link\" in user_profile: google_provider[\"user\"][\"profileLink\"] = user_profile[\"link\"]\n\n self.write({\"providers\":[ google_provider ]})\n self.set_status(200)\n\nclass AuthProvidersHandler(tornado.web.RequestHandler):\n def get(self):\n google_provider = { \"id\": \"google\", \"label\": \"Google+\", \"active\": False, \"logo\": \"https://www.google.com/images/icons/ui/gprofile_button-64.png\" }\n self.write({\"providers\": [ google_provider ] })\n self.set_status(200)\n\n\ndef parse_datastore_configuration():\n datastore_map = {}\n for datastore_config in options.mongo_datastores:\n if (len(datastore_config) == 2):\n datastore_id, uri = datastore_config\n datastore_map[datastore_id] = DataStoreConfiguration(uri, [])\n elif (len(datastore_config) == 3):\n datastore_id, uri, case_sensitive_databases = datastore_config\n datastore_map[datastore_id] = DataStoreConfiguration(uri, case_sensitive_databases)\n else:\n logging.error(\"Invalid datastore config: \" + repr(datastore_config))\n\n return datastore_map\n\ndef parse_tabix_lookup_configuration():\n tabix_file_map = {}\n for tabix_id, config in options.tabix_lookups.iteritems():\n if len(config.keys()) == 0:\n logging.warn(\"Tabix lookup \\'\" + tabix_id + \"\\' disabled - empty configuration.\")\n else:\n tabix_file_map[tabix_id] = config\n logging.info(\"Tabix lookup \\'\" + tabix_id + \"\\' enabled.\")\n \n return tabix_file_map\n\n\ndef parse_seqpeek_data_configuration():\n seqpeek_data_map = {}\n for seqpeek_data_id, config in options.seqpeek_data_lookups.iteritems():\n if len(config.keys()) == 0:\n logging.warn(\"SeqPeek data lookup \\'\" + seqpeek_data_id + \"\\' disabled - empty configuration.\")\n else:\n seqpeek_data_map[seqpeek_data_id] = config\n logging.info(\"SeqPeek data lookup \\'\" + seqpeek_data_id + \"\\' enabled.\")\n\n return seqpeek_data_map\n\n\ndef parse_variant_summary_configuration():\n REQUIRED_KEYS = set(['tabix_executable', 'vcf_file', 'triotype_file', 'feature_matrix'])\n data_map = {}\n for data_id, config in options.variant_summary_sources.iteritems():\n if len(config.keys()) == 0:\n logging.warn(\"Variant Summary lookup \\'\" + data_id + \"\\' disabled - empty configuration.\")\n continue\n\n if set(config.keys()).issubset(REQUIRED_KEYS):\n data_map[data_id] = config\n logging.info(\"Variant Summary lookup \\'\" + data_id + \"\\' enabled.\")\n\n else:\n logging.warn(\"Variant Summary lookup \\'\" + data_id + \"\\' disabled - missing fields in configuration.\")\n\n return data_map\n\n\ndef main():\n options.parse_command_line()\n if not options.config_file is None:\n options.parse_config_file(options.config_file)\n options.parse_command_line()\n\n if options.client_secret:\n settings[\"cookie_secret\"] = options.client_secret\n\n logging.info(\"Starting Tornado web server on http://localhost:%s\" % options.port)\n logging.info(\"--data_path=%s\" % options.data_path)\n logging.info(\"--client_host=%s\" % options.client_host)\n logging.info(\"--authorized_users=%s\" % options.authorized_users)\n logging.info(\"--mongo_storage_uri=%s\" % options.mongo_storage_uri)\n logging.info(\"--mongo_storage_db=%s\" % options.mongo_storage_db)\n logging.info(\"--mongo_rows_limit=%s\" % options.mongo_rows_limit)\n\n if not options.config_file is None:\n logging.info(\"--config_file=%s\" % options.config_file)\n\n if not options.config_file_json is None:\n logging.info(\"--config_file_json=%s\" % options.config_file_json)\n\n if not options.github_repo_api_url is None:\n logging.info(\"--github_repo_api_url=%s\" % options.github_repo_api_url)\n logging.info(\"--github_project_root=%s\" % options.github_project_root)\n logging.info(\"--github_branches_root=%s\" % options.github_branches_root)\n logging.info(\"--github_postproc_cmd=%s\" % options.github_postproc_cmd)\n logging.info(\"--github_git_cmd=%s\" % options.github_git_cmd)\n logging.info(\"--github_branches_json_path=%s\" % options.github_branches_json_path)\n logging.info(\"Starting GitHub Web Hook at http://localhost:%s/gitWebHook\" % options.port)\n\n MongoDbQueryHandler.datastores = parse_datastore_configuration()\n\n if not options.config_file_json is None:\n MongoDbQueryHandler.datastores_config = json.load(open(options.config_file_json))\n\n TabixLookupHandler.tabix_file_map = parse_tabix_lookup_configuration()\n\n SeqPeekDataHandler.seqpeek_data_map = parse_seqpeek_data_configuration()\n\n VariantSummaryHandler.data_map = parse_variant_summary_configuration()\n\n application = tornado.web.Application([\n (r\"/\", MainHandler),\n (r\"/auth/signin/google\", GoogleOAuth2Handler),\n (r\"/auth/signin/google/oauth2_callback\", GoogleOAuth2Handler),\n (r\"/auth/signout/google\", GoogleSignoutHandler),\n (r\"/auth/whoami\", WhoamiHandler),\n (r\"/auth/providers\", AuthProvidersHandler),\n (r\"/datastores\", MongoDbQueryHandler),\n (r\"/datastores/(.*)\", MongoDbQueryHandler),\n (r\"/data?(.*)\", LocalFileHandler),\n (r\"/storage/(.*)\", MongoDbStorageHandler),\n (r\"/collections/(.*)\", MongoDbCollectionsHandler),\n (r\"/tabix/(\\w+)/(X|Y|M|\\d{1,2})/(\\d+)\", TabixLookupHandler),\n (r\"/tabix/(\\w+)/(X|Y|M|\\d{1,2})/(\\d+)/(\\d+)\", TabixLookupHandler),\n (r\"/seqpeek_data/(.*)\", SeqPeekDataHandler),\n (r\"/variant_summary/(.*)\", VariantSummaryHandler),\n (r\"/gitWebHook?(.*)\", GitWebHookHandler)\n ], **settings)\n application.listen(options.port, **server_settings)\n tornado.ioloop.IOLoop.instance().start()\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "svc.py", "file_name": "svc.py", "file_ext": "py", "file_size_in_byte": 10612, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "tornado.options.define", "line_number": 43, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 44, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 45, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 46, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 47, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 48, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 49, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 50, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 51, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 52, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 54, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 55, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 56, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 57, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 58, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 59, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 60, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 61, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 62, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 64, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 66, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 67, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 68, "usage_type": "call"}, {"api_name": "tornado.options.define", "line_number": 69, "usage_type": "call"}, {"api_name": "tornado.ioloop.web", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 98, "usage_type": "name"}, {"api_name": "tornado.ioloop.web", "line_number": 106, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 106, "usage_type": "name"}, {"api_name": "storage.mongo.GetUserinfo", "line_number": 113, "usage_type": "call"}, {"api_name": "oauth.decorator.OAuthenticated", "line_number": 107, "usage_type": "name"}, {"api_name": "tornado.ioloop.web", "line_number": 127, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 127, "usage_type": "name"}, {"api_name": "tornado.options.options.mongo_datastores", "line_number": 136, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 136, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 144, "usage_type": "call"}, {"api_name": "tornado.options.options.tabix_lookups.iteritems", "line_number": 150, "usage_type": "call"}, {"api_name": "tornado.options.options.tabix_lookups", "line_number": 150, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 150, "usage_type": "name"}, {"api_name": "logging.warn", "line_number": 152, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 155, "usage_type": "call"}, {"api_name": "tornado.options.options.seqpeek_data_lookups.iteritems", "line_number": 162, "usage_type": "call"}, {"api_name": "tornado.options.options.seqpeek_data_lookups", "line_number": 162, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 162, "usage_type": "name"}, {"api_name": "logging.warn", "line_number": 164, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 167, "usage_type": "call"}, {"api_name": "tornado.options.options.variant_summary_sources.iteritems", "line_number": 175, "usage_type": "call"}, {"api_name": "tornado.options.options.variant_summary_sources", "line_number": 175, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 175, "usage_type": "name"}, {"api_name": "logging.warn", "line_number": 177, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 182, "usage_type": "call"}, {"api_name": "logging.warn", "line_number": 185, "usage_type": "call"}, {"api_name": "tornado.options.options.parse_command_line", "line_number": 191, "usage_type": "call"}, {"api_name": "tornado.options.options", "line_number": 191, "usage_type": "name"}, {"api_name": "tornado.options.options.config_file", "line_number": 192, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 192, "usage_type": "name"}, {"api_name": "tornado.options.options.parse_config_file", "line_number": 193, "usage_type": "call"}, {"api_name": "tornado.options.options", "line_number": 193, "usage_type": "name"}, {"api_name": "tornado.options.options.config_file", "line_number": 193, "usage_type": "attribute"}, {"api_name": "tornado.options.options.parse_command_line", "line_number": 194, "usage_type": "call"}, {"api_name": "tornado.options.options", "line_number": 194, "usage_type": "name"}, {"api_name": "tornado.options.options.client_secret", "line_number": 196, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 196, "usage_type": "name"}, {"api_name": "tornado.options.options.client_secret", "line_number": 197, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 197, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 199, "usage_type": "call"}, {"api_name": "tornado.options.options.port", "line_number": 199, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 199, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 200, "usage_type": "call"}, {"api_name": "tornado.options.options.data_path", "line_number": 200, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 200, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 201, "usage_type": "call"}, {"api_name": "tornado.options.options.client_host", "line_number": 201, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 201, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 202, "usage_type": "call"}, {"api_name": "tornado.options.options.authorized_users", "line_number": 202, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 202, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 203, "usage_type": "call"}, {"api_name": "tornado.options.options.mongo_storage_uri", "line_number": 203, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 203, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 204, "usage_type": "call"}, {"api_name": "tornado.options.options.mongo_storage_db", "line_number": 204, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 204, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 205, "usage_type": "call"}, {"api_name": "tornado.options.options.mongo_rows_limit", "line_number": 205, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 205, "usage_type": "name"}, {"api_name": "tornado.options.options.config_file", "line_number": 207, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 207, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 208, "usage_type": "call"}, {"api_name": "tornado.options.options.config_file", "line_number": 208, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 208, "usage_type": "name"}, {"api_name": "tornado.options.options.config_file_json", "line_number": 210, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 210, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 211, "usage_type": "call"}, {"api_name": "tornado.options.options.config_file_json", "line_number": 211, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 211, "usage_type": "name"}, {"api_name": "tornado.options.options.github_repo_api_url", "line_number": 213, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 213, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 214, "usage_type": "call"}, {"api_name": "tornado.options.options.github_repo_api_url", "line_number": 214, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 214, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 215, "usage_type": "call"}, {"api_name": "tornado.options.options.github_project_root", "line_number": 215, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 215, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 216, "usage_type": "call"}, {"api_name": "tornado.options.options.github_branches_root", "line_number": 216, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 216, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 217, "usage_type": "call"}, {"api_name": "tornado.options.options.github_postproc_cmd", "line_number": 217, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 217, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 218, "usage_type": "call"}, {"api_name": "tornado.options.options.github_git_cmd", "line_number": 218, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 218, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 219, "usage_type": "call"}, {"api_name": "tornado.options.options.github_branches_json_path", "line_number": 219, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 219, "usage_type": "name"}, {"api_name": "logging.info", "line_number": 220, "usage_type": "call"}, {"api_name": "tornado.options.options.port", "line_number": 220, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 220, "usage_type": "name"}, {"api_name": "datastores.mongo.MongoDbQueryHandler.datastores", "line_number": 222, "usage_type": "attribute"}, {"api_name": "datastores.mongo.MongoDbQueryHandler", "line_number": 222, "usage_type": "name"}, {"api_name": "tornado.options.options.config_file_json", "line_number": 224, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 224, "usage_type": "name"}, {"api_name": "datastores.mongo.MongoDbQueryHandler.datastores_config", "line_number": 225, "usage_type": "attribute"}, {"api_name": "datastores.mongo.MongoDbQueryHandler", "line_number": 225, "usage_type": "name"}, {"api_name": "json.load", "line_number": 225, "usage_type": "call"}, {"api_name": "tornado.options.options.config_file_json", "line_number": 225, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 225, "usage_type": "name"}, {"api_name": "tabix.tabix_lookup.TabixLookupHandler.tabix_file_map", "line_number": 227, "usage_type": "attribute"}, {"api_name": "tabix.tabix_lookup.TabixLookupHandler", "line_number": 227, "usage_type": "name"}, {"api_name": "tabix.seqpeek_data_lookup.SeqPeekDataHandler.seqpeek_data_map", "line_number": 229, "usage_type": "attribute"}, {"api_name": "tabix.seqpeek_data_lookup.SeqPeekDataHandler", "line_number": 229, "usage_type": "name"}, {"api_name": "tabix.variant_summary_handler.VariantSummaryHandler.data_map", "line_number": 231, "usage_type": "attribute"}, {"api_name": "tabix.variant_summary_handler.VariantSummaryHandler", "line_number": 231, "usage_type": "name"}, {"api_name": "tornado.ioloop.web.Application", "line_number": 233, "usage_type": "call"}, {"api_name": "tornado.ioloop.web", "line_number": 233, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 233, "usage_type": "name"}, {"api_name": "oauth.google.GoogleOAuth2Handler", "line_number": 235, "usage_type": "name"}, {"api_name": "oauth.google.GoogleOAuth2Handler", "line_number": 236, "usage_type": "name"}, {"api_name": "oauth.google.GoogleSignoutHandler", "line_number": 237, "usage_type": "name"}, {"api_name": "datastores.mongo.MongoDbQueryHandler", "line_number": 240, "usage_type": "name"}, {"api_name": "datastores.mongo.MongoDbQueryHandler", "line_number": 241, "usage_type": "name"}, {"api_name": "datastores.localfiles.LocalFileHandler", "line_number": 242, "usage_type": "name"}, {"api_name": "storage.mongo.MongoDbStorageHandler", "line_number": 243, "usage_type": "name"}, {"api_name": "storage.collections.MongoDbCollectionsHandler", "line_number": 244, "usage_type": "name"}, {"api_name": "tabix.tabix_lookup.TabixLookupHandler", "line_number": 245, "usage_type": "name"}, {"api_name": "tabix.tabix_lookup.TabixLookupHandler", "line_number": 246, "usage_type": "name"}, {"api_name": "tabix.seqpeek_data_lookup.SeqPeekDataHandler", "line_number": 247, "usage_type": "name"}, {"api_name": "tabix.variant_summary_handler.VariantSummaryHandler", "line_number": 248, "usage_type": "name"}, {"api_name": "scc.github.GitWebHookHandler", "line_number": 249, "usage_type": "name"}, {"api_name": "tornado.options.options.port", "line_number": 251, "usage_type": "attribute"}, {"api_name": "tornado.options.options", "line_number": 251, "usage_type": "name"}, {"api_name": "tornado.ioloop.ioloop.IOLoop.instance", "line_number": 252, "usage_type": "call"}, {"api_name": "tornado.ioloop.ioloop", "line_number": 252, "usage_type": "attribute"}, {"api_name": "tornado.ioloop", "line_number": 252, "usage_type": "name"}]} +{"seq_id": "138526829", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import make_classification\n\n\ndef sigmoid(x):\n # sigmoid function\n h = 1.0 / (1.0 + np.exp(-x))\n return h\n\n\ndef create_dataset(size):\n X, y = make_classification(n_samples=size, n_classes=2, n_features=2,\n n_redundant=0, n_informative=2,\n random_state=1, n_clusters_per_class=1)\n rng = np.random.RandomState(2)\n X += 2 * rng.uniform(size=X.shape)\n ones = np.ones([size, 1])\n X = np.hstack([ones, X])\n\n return X, y\n\n\ndef gradient_descent(X, y, learning_rate, size, num_iterations):\n w = np.mat(np.random.rand(3, 1))\n y = y.reshape(size, 1)\n for i in range(num_iterations):\n h = sigmoid(np.dot(X, w))\n dw = np.dot(X.T, (h - y))\n w = w - learning_rate * dw\n\n return w\n\n\ndef run():\n learning_rate = 0.1\n num_iterations = 100\n size = 100\n\n X, y = create_dataset(size)\n w = gradient_descent(X, y, learning_rate, size, num_iterations)\n\n # boundary function\n # 0 = w0 + w1x1 + w2x2\n w0 = w[0, 0]\n w1 = w[1, 0]\n w2 = w[2, 0]\n\n x1 = np.linspace(-5, 5)\n x2 = - w0 / w2 - w1 / w2 * x1\n\n plt.scatter(X[:, 1], X[:, 2], c=y, cmap=\"RdBu\")\n plt.plot(x1,x2)\n plt.xlim((-5, 5))\n plt.ylim((-5, 5))\n plt.show()\n print(w)\n\n\nif __name__ == '__main__':\n run()\n", "sub_path": "logistic_regression/logistic_regression.py", "file_name": "logistic_regression.py", "file_ext": "py", "file_size_in_byte": 1385, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.exp", "line_number": 8, "usage_type": "call"}, {"api_name": "sklearn.datasets.make_classification", "line_number": 13, "usage_type": "call"}, {"api_name": "numpy.random.RandomState", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 16, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.mat", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}]} +{"seq_id": "417302794", "text": "# -*- coding: utf-8 -*-\n\nfrom mrjob.job import MRJob\nimport string \n\nclass MRInvertedIndex(MRJob):\n\n def mapper(self, _, line):\n for x in string.punctuation:\n line = line.replace(x,' ')\n words = line.split()\n line_number = words[0]\n for word in words[1:]:\n yield (word.lower(), line_number)\n \n def reducer(self, word, lines):\n line_index = []\n for l in lines:\n if not l in line_index:\n line_index.append(l)\n yield word, line_index\n \nif __name__ == '__main__':\n MRInvertedIndex.run()\n", "sub_path": "tema2_Paralela/060-inverted_index.py", "file_name": "060-inverted_index.py", "file_ext": "py", "file_size_in_byte": 601, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "mrjob.job.MRJob", "line_number": 6, "usage_type": "name"}, {"api_name": "string.punctuation", "line_number": 9, "usage_type": "attribute"}]} +{"seq_id": "630270147", "text": "import time\r\n\r\nfrom threading import Thread\r\n\r\nimport pytest\r\n\r\nfrom pydantic import BaseModel\r\n\r\nfrom airport.utils.cache.heap import Heap\r\nfrom airport.utils.cache.heap import HeapClosed\r\nfrom airport.utils.cache.heap import HeapObjectNotFound\r\n\r\n\r\nclass HeapTestObject(BaseModel):\r\n name: str\r\n value: int\r\n\r\n\r\ndef make_heap_obj(name: str, value: int) -> HeapTestObject:\r\n return HeapTestObject(name=name, value=value)\r\n\r\n\r\ndef heap_key_func(obj: HeapTestObject):\r\n return obj.name\r\n\r\n\r\ndef compare_ints(value1: HeapTestObject, value2: HeapTestObject) -> bool:\r\n return value1.value < value2.value\r\n\r\n\r\n@pytest.fixture\r\ndef heap() -> Heap[HeapTestObject]:\r\n return Heap.new(heap_key_func, compare_ints)\r\n\r\n\r\ndef test_heap_basic(heap: Heap[HeapTestObject]):\r\n\r\n amount = 500\r\n\r\n def task1():\r\n for i in reversed(range(amount)):\r\n heap.add(make_heap_obj(f\"a{i}\", i))\r\n\r\n thread1 = Thread(target=task1)\r\n\r\n def task2():\r\n for i in range(amount):\r\n heap.add(make_heap_obj(f\"b{i}\", i))\r\n\r\n thread2 = Thread(target=task2)\r\n\r\n thread1.start()\r\n thread2.start()\r\n thread1.join()\r\n thread2.join()\r\n\r\n prev_num = 0\r\n for i in range(amount * 2):\r\n obj = heap.pop()\r\n num = obj.value\r\n if prev_num > num:\r\n pytest.fail(f\"got {obj} out of order, last was {prev_num}\")\r\n\r\n prev_num = num\r\n\r\n\r\ndef test_heap_add(heap: Heap[HeapTestObject]):\r\n heap.add(make_heap_obj(\"foo\", 10))\r\n heap.add(make_heap_obj(\"bar\", 1))\r\n heap.add(make_heap_obj(\"baz\", 11))\r\n heap.add(make_heap_obj(\"zab\", 30))\r\n heap.add(make_heap_obj(\"foo\", 13))\r\n\r\n assert heap.pop().value == 1\r\n assert heap.pop().value == 11\r\n\r\n with pytest.raises(HeapObjectNotFound):\r\n # baz already pop\r\n heap.delete(make_heap_obj(\"baz\", 11))\r\n heap.add(make_heap_obj(\"foo\", 14)) # update foo\r\n\r\n assert heap.pop().value == 14\r\n assert heap.pop().value == 30\r\n\r\n\r\ndef test_heap_bulk_add(heap: Heap[HeapTestObject]):\r\n amount = 500\r\n\r\n def task():\r\n datas = [make_heap_obj(f\"a{i}\", i) for i in reversed(range(amount))]\r\n heap.bulk_add(datas)\r\n\r\n Thread(target=task).start()\r\n\r\n prev_num = -1\r\n for i in range(amount):\r\n obj = heap.pop()\r\n if prev_num >= obj.value:\r\n pytest.fail(f\"got {obj} out of order, last was {prev_num}\")\r\n\r\n\r\ndef test_heap_empty_pop(heap: Heap[HeapTestObject]):\r\n def task():\r\n time.sleep(1)\r\n heap.close()\r\n\r\n Thread(target=task).start()\r\n\r\n with pytest.raises(HeapClosed):\r\n heap.pop()\r\n\r\n\r\ndef test_heap_add_if_not_present(heap: Heap[HeapTestObject]):\r\n heap.add_if_not_present(make_heap_obj(\"foo\", 10))\r\n heap.add_if_not_present(make_heap_obj(\"bar\", 1))\r\n heap.add_if_not_present(make_heap_obj(\"baz\", 11))\r\n heap.add_if_not_present(make_heap_obj(\"zab\", 30))\r\n heap.add_if_not_present(make_heap_obj(\"foo\", 13)) # update\r\n\r\n assert len(heap.data.items) == 4\r\n assert heap.data.items[\"foo\"].obj.value == 10\r\n\r\n assert heap.pop().value == 1\r\n assert heap.pop().value == 10\r\n\r\n heap.add_if_not_present(make_heap_obj(\"bar\", 14))\r\n assert heap.pop().value == 11\r\n assert heap.pop().value == 14\r\n\r\n\r\ndef test_heap_delete(heap: Heap[HeapTestObject]):\r\n heap.add(make_heap_obj(\"foo\", 10))\r\n heap.add(make_heap_obj(\"bar\", 1))\r\n heap.add(make_heap_obj(\"bal\", 31))\r\n heap.add(make_heap_obj(\"baz\", 11))\r\n\r\n heap.delete(make_heap_obj(\"bar\", 200))\r\n assert heap.pop().value == 10\r\n\r\n heap.add(make_heap_obj(\"zab\", 30))\r\n heap.add(make_heap_obj(\"faz\", 30))\r\n data_len = len(heap.data)\r\n\r\n with pytest.raises(HeapObjectNotFound):\r\n heap.delete(make_heap_obj(\"non-existent\", 10))\r\n assert len(heap.data) == data_len\r\n\r\n heap.delete(make_heap_obj(\"bal\", 31))\r\n heap.delete(make_heap_obj(\"zab\", 30))\r\n\r\n assert heap.pop().value == 11\r\n assert heap.pop().value == 30\r\n\r\n assert len(heap.data) == 0\r\n\r\n\r\ndef test_heap_update(heap: Heap[HeapTestObject]):\r\n heap.add(make_heap_obj(\"foo\", 10))\r\n heap.add(make_heap_obj(\"bar\", 1))\r\n heap.add(make_heap_obj(\"bal\", 31))\r\n heap.add(make_heap_obj(\"baz\", 11))\r\n\r\n heap.update(make_heap_obj(\"baz\", 0))\r\n\r\n assert heap.data.queue[0] == \"baz\" and heap.data.items[\"baz\"].index == 0\r\n assert heap.pop().value == 0\r\n\r\n heap.update(make_heap_obj(\"bar\", 100))\r\n assert heap.data.queue[0] == \"foo\" and heap.data.items[\"foo\"].index == 0\r\n\r\n\r\ndef test_heap_get(heap: Heap[HeapTestObject]):\r\n heap.add(make_heap_obj(\"foo\", 10))\r\n heap.add(make_heap_obj(\"bar\", 1))\r\n heap.add(make_heap_obj(\"bal\", 31))\r\n heap.add(make_heap_obj(\"baz\", 11))\r\n\r\n obj = heap.get(make_heap_obj(\"baz\", 0))\r\n assert obj is not None and obj.value == 11\r\n\r\n obj = heap.get(make_heap_obj(\"non-existing\", 0))\r\n assert obj is None\r\n\r\n\r\ndef test_heap_get_by_key(heap: Heap[HeapTestObject]):\r\n heap.add(make_heap_obj(\"foo\", 10))\r\n heap.add(make_heap_obj(\"bar\", 1))\r\n heap.add(make_heap_obj(\"bal\", 31))\r\n heap.add(make_heap_obj(\"baz\", 11))\r\n\r\n obj = heap.get_by_key(\"baz\")\r\n assert obj is not None and obj.value == 11\r\n\r\n obj = heap.get_by_key(\"non-existing\")\r\n assert obj is None\r\n\r\n\r\ndef test_heap_close(heap: Heap[HeapTestObject]):\r\n heap.add(make_heap_obj(\"foo\", 10))\r\n heap.add(make_heap_obj(\"bar\", 1))\r\n\r\n assert not heap.closed, \"didn't expect heap to be closed\"\r\n\r\n heap.close()\r\n assert heap.closed, \"expect heap to be closed\"\r\n\r\n\r\ndef test_heap_list(heap: Heap[HeapTestObject]):\r\n heap_list = heap.list()\r\n assert len(heap_list) == 0\r\n\r\n items = {\"foo\": 10, \"bar\": 1, \"bal\": 31, \"baz\": 11, \"faz\": 30}\r\n\r\n for k, v in items.items():\r\n heap.add(make_heap_obj(k, v))\r\n\r\n assert len(heap.list()) == len(items)\r\n\r\n for obj in heap.list():\r\n assert items[obj.name] == obj.value\r\n\r\n\r\ndef test_heap_list_keys(heap: Heap[HeapTestObject]):\r\n list_keys = heap.list_keys()\r\n assert len(list_keys) == 0\r\n\r\n items = {\"foo\": 10, \"bar\": 1, \"bal\": 31, \"baz\": 11, \"faz\": 30}\r\n for k, v in items.items():\r\n heap.add(make_heap_obj(k, v))\r\n\r\n assert len(heap.list_keys()) == len(items)\r\n\r\n for key in heap.list_keys():\r\n assert items.get(key)\r\n\r\n\r\ndef test_heap_after_close(heap: Heap[HeapTestObject]):\r\n heap.close()\r\n\r\n with pytest.raises(HeapClosed):\r\n heap.add(make_heap_obj(\"test\", 1))\r\n\r\n with pytest.raises(HeapClosed):\r\n heap.add_if_not_present(make_heap_obj(\"test\", 1))\r\n\r\n with pytest.raises(HeapClosed):\r\n heap.bulk_add([make_heap_obj(\"test\", 1)])\r\n", "sub_path": "tests/utils/cache/test_heap.py", "file_name": "test_heap.py", "file_ext": "py", "file_size_in_byte": 6624, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pydantic.BaseModel", "line_number": 14, "usage_type": "name"}, {"api_name": "airport.utils.cache.heap.Heap.new", "line_number": 33, "usage_type": "call"}, {"api_name": "airport.utils.cache.heap.Heap", "line_number": 33, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 31, "usage_type": "attribute"}, {"api_name": "airport.utils.cache.heap.Heap", "line_number": 32, "usage_type": "name"}, {"api_name": "airport.utils.cache.heap.Heap", "line_number": 36, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 44, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 50, "usage_type": "call"}, {"api_name": "pytest.fail", "line_number": 62, "usage_type": "call"}, {"api_name": "airport.utils.cache.heap.Heap", "line_number": 67, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 77, "usage_type": "call"}, {"api_name": "airport.utils.cache.heap.HeapObjectNotFound", "line_number": 77, "usage_type": "argument"}, {"api_name": "airport.utils.cache.heap.Heap", "line_number": 86, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 93, "usage_type": "call"}, {"api_name": "pytest.fail", "line_number": 99, "usage_type": "call"}, {"api_name": "airport.utils.cache.heap.Heap", "line_number": 102, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 104, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 107, "usage_type": "call"}, {"api_name": "pytest.raises", "line_number": 109, "usage_type": "call"}, {"api_name": "airport.utils.cache.heap.HeapClosed", "line_number": 109, "usage_type": "argument"}, {"api_name": "airport.utils.cache.heap.Heap", "line_number": 113, "usage_type": "name"}, {"api_name": "airport.utils.cache.heap.Heap", "line_number": 131, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 144, "usage_type": "call"}, {"api_name": "airport.utils.cache.heap.HeapObjectNotFound", "line_number": 144, "usage_type": "argument"}, {"api_name": "airport.utils.cache.heap.Heap", "line_number": 157, "usage_type": "name"}, {"api_name": "airport.utils.cache.heap.Heap", "line_number": 172, "usage_type": "name"}, {"api_name": "airport.utils.cache.heap.Heap", "line_number": 185, "usage_type": "name"}, {"api_name": "airport.utils.cache.heap.Heap", "line_number": 198, "usage_type": "name"}, {"api_name": "airport.utils.cache.heap.Heap", "line_number": 208, "usage_type": "name"}, {"api_name": "airport.utils.cache.heap.Heap", "line_number": 223, "usage_type": "name"}, {"api_name": "airport.utils.cache.heap.Heap", "line_number": 237, "usage_type": "name"}, {"api_name": "pytest.raises", "line_number": 240, "usage_type": "call"}, {"api_name": "airport.utils.cache.heap.HeapClosed", "line_number": 240, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 243, "usage_type": "call"}, {"api_name": "airport.utils.cache.heap.HeapClosed", "line_number": 243, "usage_type": "argument"}, {"api_name": "pytest.raises", "line_number": 246, "usage_type": "call"}, {"api_name": "airport.utils.cache.heap.HeapClosed", "line_number": 246, "usage_type": "argument"}]} +{"seq_id": "35027223", "text": "import multiprocessing\nimport os\nimport random\nimport math\n\nimport cv2\nimport keras.backend as K\nfrom keras.utils import to_categorical\n\nimport numpy as np\nfrom tensorflow.python.client import device_lib\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom matplotlib import gridspec\n\ndef set_npy_weights(weights_path, model):\n npy_weights_path = os.path.join(\"./segmentation_models/pspnet_temp/pretrained_weights\", \"npy\", weights_path + \".npy\")\n print(npy_weights_path)\n json_path = os.path.join(\"./segmentation_models/pspnet_temp/pretrained_weights\", \"keras\", weights_path + \".json\")\n print(json_path)\n h5_path = os.path.join(\"./segmentation_models/pspnet_temp/pretrained_weights\", \"keras\", weights_path + \".h5\")\n print(h5_path)\n\n print(\"Importing weights from %s\" % npy_weights_path)\n weights = np.load(npy_weights_path,encoding=\"latin1\").item()\n\n for layer in model.layers:\n print(layer.name)\n if layer.name[:4] == 'conv' and layer.name[-2:] == 'bn':\n mean = weights[layer.name]['mean'].reshape(-1)\n variance = weights[layer.name]['variance'].reshape(-1)\n scale = weights[layer.name]['scale'].reshape(-1)\n offset = weights[layer.name]['offset'].reshape(-1)\n model.get_layer(layer.name).set_weights([scale, offset, mean, variance])\n elif layer.name[:4] == 'conv' and not layer.name[-4:] == 'relu':\n try:\n weight = weights[layer.name]['weights']\n model.get_layer(layer.name).set_weights([weight])\n except Exception as err:\n try:\n biases = weights[layer.name]['biases']\n model.get_layer(layer.name).set_weights([weight,\n biases])\n except Exception as err2:\n print(err2)\n if layer.name == 'activation_52':\n break\n\n# getting the number of GPUs\ndef get_available_gpus():\n local_device_protos = device_lib.list_local_devices()\n return [x.name for x in local_device_protos if x.device_type == 'GPU']\n\n# getting the number of CPUs\ndef get_available_cpus():\n return multiprocessing.cpu_count()\n\n# getting the number of CPUs\ndef get_txt_length(txt_path):\n with open(txt_path) as f:\n names = f.read().splitlines()\n return len(names)\n\n# randomly create patch\ndef random_patch(image, mask,patch_size):\n h, w = image.shape[:2]\n max_size = max(h, w)\n min_size = max_size // 2\n\n sqr_img = np.zeros((max_size, max_size, 3), np.uint8)\n sqr_mask = np.zeros((max_size, max_size), np.float32)\n\n if h >= w:\n sqr_img[:, (h - w) // 2: (h - w) // 2 + w] = image\n sqr_mask[:, (h - w) // 2: (h - w) // 2 + w] = mask\n else:\n sqr_img[(w - h) // 2: (w - h) // 2 + h, :] = image\n sqr_mask[(w - h) // 2: (w - h) // 2 + h, :] = mask\n\n crop_size = random.randint(min_size, max_size) # both value are inclusive\n \n x = random.randint(0, max_size - crop_size) # 0 is inclusive\n y = random.randint(0, max_size - crop_size)\n image = sqr_img[y: y + crop_size, x: x + crop_size]\n mask = sqr_mask[y: y + crop_size, x: x + crop_size]\n\n image = cv2.resize(image, (patch_size[1], patch_size[0]), interpolation=cv2.INTER_LINEAR)\n mask = cv2.resize(mask, (patch_size[1], patch_size[0]), interpolation=cv2.INTER_LINEAR)\n\n return image,mask\n\ndef pad_patch(image, mask,patch_size):\n h, w = image.shape[:2]\n max_size = max(h, w)\n min_size = max_size // 2\n\n sqr_img = np.zeros((max_size, max_size, 3), np.uint8)\n sqr_mask = np.zeros((max_size, max_size), np.float32)\n\n if h >= w:\n sqr_img[:, (h - w) // 2: (h - w) // 2 + w] = image\n sqr_mask[:, (h - w) // 2: (h - w) // 2 + w] = mask\n else:\n sqr_img[(w - h) // 2: (w - h) // 2 + h, :] = image\n sqr_mask[(w - h) // 2: (w - h) // 2 + h, :] = mask\n\n image = cv2.resize(image, (patch_size[1], patch_size[0]), interpolation=cv2.INTER_CUBIC)\n mask = cv2.resize(mask, (patch_size[1], patch_size[0]), interpolation=cv2.INTER_CUBIC)\n\n return image,mask\n\n# Plot the training and validation loss + accuracy\ndef plot_training(history,pic_name='train_val_loss.png'):\n plt.style.use(\"ggplot\")\n plt.figure()\n plt.plot(history.history['loss'], label=\"train_loss\")\n plt.plot(history.history['val_loss'], label=\"val_loss\")\n plt.plot(history.history['acc'],label=\"train_acc\")\n plt.plot(history.history['val_acc'],label=\"val_acc\")\n plt.title(\"Train/Val Loss and Train/Val Acc\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss/Acc\")\n plt.legend(loc=\"upper left\")\n plt.savefig(pic_name)\n\ndef random_rescale_image_and_mask(image,mask,min_scale = 0.5, max_scale = 2):\n rows = image.shape[0]\n cols = image.shape[1]\n # print(\"image.shape:{}\".format(image.shape))\n # print(\"mask.shape:{}\".format(mask.shape))\n # print(\"rows:{},cols:{}\".format(rows,cols))\n ratio = random.uniform(min_scale,max_scale)\n # print(\"ratio:{}\".format(ratio))\n new_rows = int(ratio*rows)\n new_cols = int(ratio*cols)\n # print(\"new_rows:{},new_cols:{}\".format(new_rows,new_cols))\n image = cv2.resize(image, dsize=(new_cols, new_rows), interpolation=cv2.INTER_LINEAR)\n mask = cv2.resize(mask, dsize=(new_cols, new_rows), interpolation=cv2.INTER_LINEAR)\n # print(\"image.shape:{}\".format(image.shape))\n # print(\"mask.shape:{}\".format(mask.shape))\n return image,mask\n\ndef random_trimap(alpha):\n mask = alpha.copy() # 0~255\n # 非纯背景置为255\n mask = ((mask!=0)*255).astype(np.float32) # 0.0和255.0\n #mask = ((mask==255)*255).astype(np.float32) # 0.0和255.0\n kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))\n dilate = cv2.dilate(mask, kernel, iterations=np.random.randint(1, 5)) \n erode = cv2.erode(mask, kernel, iterations=np.random.randint(1, 5)) \n # 128/255/0\n img_trimap = ((mask-erode)==255.0)*128 + ((dilate-mask)==255.0)*128 + erode\n # 加上本来是128的区域\n bool_unkonw = (alpha!=255)*(alpha!=0)\n img_trimap = img_trimap*(1-bool_unkonw)+bool_unkonw*128\n return img_trimap.astype(np.uint8)\n\n# Randomly crop (image, trimap) pairs centered on pixels in the unknown regions.\ndef random_choice(trimap, crop_size=(512, 512)):\n crop_height, crop_width = crop_size\n # np.where(arry):输出arry中‘真’值的坐标(‘真’也可以理解为非零)\n # 返回:(array([]),array([])) 第一个array([])是行坐标,第二个array([])是列坐标\n y_indices, x_indices = np.where(trimap == 128)\n # 未知像素的数量\n num_unknowns = len(y_indices)\n x, y = 0, 0\n if num_unknowns > 0:\n # 任取一个未知像素的坐标\n ix = np.random.choice(range(num_unknowns))\n center_x = x_indices[ix]\n center_y = y_indices[ix]\n # 为下面的剪裁提供起始点\n x = max(0, center_x - int(crop_width / 2))\n y = max(0, center_y - int(crop_height / 2))\n return x, y\n\ndef safe_crop(mat, x, y, crop_size=(512, 512)):\n # 例如:crop_height = 640,crop_width = 640\n crop_height, crop_width = crop_size\n # 对于alpha,先建立尺寸为(crop_height, crop_width)的全0数组\n if len(mat.shape) == 2:\n ret = np.zeros((crop_height, crop_width), np.float32)\n # 对于fg,bg,image,先建立尺寸为(crop_height, crop_width,3)的全0数组\n else:\n ret = np.zeros((crop_height, crop_width, 3), np.float32)\n # 注意:这里是函数名为safe_crop的原因!\n # 若(y+crop_height)超出了mat的范围,则也不会报错,直接取到mat的边界即停止\n # 因此crop的尺寸不一定是(crop_height,crop_height),有可能小于(crop_height,crop_height)\n crop = mat[y:y+crop_height, x:x+crop_width]\n # 得到crop的尺寸\n h, w = crop.shape[:2]\n # 将crop所包含的内容,赋给ret\n # 当然,ret其余部分为0\n ret[0:h, 0:w] = crop\n # # 缩放到(img_rows,img_cols),即(320,320)\n # if crop_size != (img_rows, img_cols):\n # # dsize即指的是Size(width,height)\n # print(\"crop_size != (512,512)\")\n # ret = cv2.resize(ret, dsize=(img_rows, img_cols), interpolation=cv2.INTER_NEAREST)\n\n return ret\n\ndef trimap_one_hot_encoding(trimap):\n trimap[trimap == 0] = 0\n trimap[trimap == 255] = 1\n trimap[trimap == 128] = 2\n trimap = to_categorical(trimap, 3) \n return trimap\n\ndef make_mask_for_batch_y(mask):\n for row in range(mask.shape[0]):\n for col in range(mask.shape[1]):\n # 背景区域为第0类\n if(mask[row,col]==0):\n mask[row,col]=0\n # 前景区域为第1类\n if(mask[row,col]==255):\n mask[row,col]=1\n mask = to_categorical(mask, 2) \n return mask\n\ndef colorful(out):\n img_rows, img_cols= out.shape[0],out.shape[1]\n result_rgb = np.empty((img_rows,img_cols, 3), dtype=np.uint8)\n for row in range(img_rows):\n for col in range(img_cols):\n # 背景区域为第0类\n if(out[row,col]==0):\n result_rgb[row,col,0]=0\n result_rgb[row,col,1]=0\n result_rgb[row,col,2]=0\n # 前景区域为第1类\n if(out[row,col]==1):\n result_rgb[row,col,0]=255\n result_rgb[row,col,1]=255\n result_rgb[row,col,2]=255\n # 128区域为第2类\n if(out[row,col]==2): \n result_rgb[row,col,0]=128\n result_rgb[row,col,1]=128\n result_rgb[row,col,2]=128\n return result_rgb\n\ndef rotate_image(image, angle):\n \"\"\"\n Rotates an OpenCV 2 / NumPy image about it's centre by the given angle\n (in degrees). The returned image will be large enough to hold the entire\n new image, with a black background\n \"\"\"\n\n # Get the image size\n # No that's not an error - NumPy stores image matricies backwards\n image_size = (image.shape[1], image.shape[0])\n image_center = tuple(np.array(image_size) / 2)\n\n # Convert the OpenCV 3x2 rotation matrix to 3x3\n rot_mat = np.vstack(\n [cv2.getRotationMatrix2D(image_center, angle, 1.0), [0, 0, 1]]\n )\n\n rot_mat_notranslate = np.matrix(rot_mat[0:2, 0:2])\n\n # Shorthand for below calcs\n image_w2 = image_size[0] * 0.5\n image_h2 = image_size[1] * 0.5\n\n # Obtain the rotated coordinates of the image corners\n rotated_coords = [\n (np.array([-image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([ image_w2, image_h2]) * rot_mat_notranslate).A[0],\n (np.array([-image_w2, -image_h2]) * rot_mat_notranslate).A[0],\n (np.array([ image_w2, -image_h2]) * rot_mat_notranslate).A[0]\n ]\n\n # Find the size of the new image\n x_coords = [pt[0] for pt in rotated_coords]\n x_pos = [x for x in x_coords if x > 0]\n x_neg = [x for x in x_coords if x < 0]\n\n y_coords = [pt[1] for pt in rotated_coords]\n y_pos = [y for y in y_coords if y > 0]\n y_neg = [y for y in y_coords if y < 0]\n\n right_bound = max(x_pos)\n left_bound = min(x_neg)\n top_bound = max(y_pos)\n bot_bound = min(y_neg)\n\n new_w = int(abs(right_bound - left_bound))\n new_h = int(abs(top_bound - bot_bound))\n\n # We require a translation matrix to keep the image centred\n trans_mat = np.matrix([\n [1, 0, int(new_w * 0.5 - image_w2)],\n [0, 1, int(new_h * 0.5 - image_h2)],\n [0, 0, 1]\n ])\n\n # Compute the tranform for the combined rotation and translation\n affine_mat = (np.matrix(trans_mat) * np.matrix(rot_mat))[0:2, :]\n\n # Apply the transform\n result = cv2.warpAffine(\n image,\n affine_mat,\n (new_w, new_h),\n flags=cv2.INTER_LINEAR\n )\n\n return result\n\ndef largest_rotated_rect(w, h, angle):\n \"\"\"\n Given a rectangle of size wxh that has been rotated by 'angle' (in\n radians), computes the width and height of the largest possible\n axis-aligned rectangle within the rotated rectangle.\n\n Original JS code by 'Andri' and Magnus Hoff from Stack Overflow\n\n Converted to Python by Aaron Snoswell\n \"\"\"\n\n quadrant = int(math.floor(angle / (math.pi / 2))) & 3\n sign_alpha = angle if ((quadrant & 1) == 0) else math.pi - angle\n alpha = (sign_alpha % math.pi + math.pi) % math.pi\n\n bb_w = w * math.cos(alpha) + h * math.sin(alpha)\n bb_h = w * math.sin(alpha) + h * math.cos(alpha)\n\n gamma = math.atan2(bb_w, bb_w) if (w < h) else math.atan2(bb_w, bb_w)\n\n delta = math.pi - alpha - gamma\n\n length = h if (w < h) else w\n\n d = length * math.cos(alpha)\n a = d * math.sin(alpha) / math.sin(delta)\n\n y = a * math.cos(gamma)\n x = y * math.tan(gamma)\n\n return (\n bb_w - 2 * x,\n bb_h - 2 * y\n )\n\ndef crop_around_center(image, width, height):\n \"\"\"\n Given a NumPy / OpenCV 2 image, crops it to the given width and height,\n around it's centre point\n \"\"\"\n\n image_size = (image.shape[1], image.shape[0])\n image_center = (int(image_size[0] * 0.5), int(image_size[1] * 0.5))\n\n if(width > image_size[0]):\n width = image_size[0]\n\n if(height > image_size[1]):\n height = image_size[1]\n\n x1 = int(image_center[0] - width * 0.5)\n x2 = int(image_center[0] + width * 0.5)\n y1 = int(image_center[1] - height * 0.5)\n y2 = int(image_center[1] + height * 0.5)\n\n return image[y1:y2, x1:x2]\n\ndef pad_and_resize_to_target_size(image, target_rows, target_cols,interpolation = cv2.INTER_LINEAR):\n mat = np.zeros((target_rows, target_cols, 3), np.float32)\n \n rows,cols = image.shape[0],image.shape[1]\n if rows>cols:\n larger = rows\n ratio = target_rows/larger\n else:\n larger = cols\n ratio = target_cols/larger\n new_rows = int(np.round(rows*ratio))\n new_cols = int(np.round(cols*ratio))\n image_new = cv2.resize(image,(new_cols,new_rows),interpolation = cv2.INTER_LINEAR)\n\n mat[0:new_rows, 0:new_cols,:] = image_new\n\n return mat.astype(np.uint8)\n\ndef pad_and_resize_mask_to_target_size(image, target_rows, target_cols,interpolation = cv2.INTER_NEAREST):\n mat = np.zeros((target_rows, target_cols, 3), np.float32)\n \n rows,cols = image.shape[0],image.shape[1]\n if rows>cols:\n larger = rows\n ratio = target_rows/larger\n else:\n larger = cols\n ratio = target_cols/larger\n new_rows = int(np.round(rows*ratio))\n new_cols = int(np.round(cols*ratio))\n image_new = cv2.resize(image,(new_cols,new_rows),interpolation = cv2.INTER_NEAREST)\n \n mat[0:new_rows, 0:new_cols,:] = image_new\n\n return mat.astype(np.uint8)\n\ndef resize_to_target_size(image, target_rows, target_cols,interpolation = cv2.INTER_LINEAR):\n rows,cols = image.shape[0],image.shape[1]\n if rows>cols:\n larger = rows\n ratio = target_rows/larger\n else:\n larger = cols\n ratio = target_cols/larger\n new_rows = int(np.round(rows*ratio))//16*16\n new_cols = int(np.round(cols*ratio))//16*16\n image_new = cv2.resize(image,(new_cols,new_rows),interpolation = cv2.INTER_LINEAR)\n # image_new = cv2.resize(image,(target_rows,target_cols),interpolation = cv2.INTER_LINEAR)\n\n return image_new\n\ndef vis_segmentation(image, seg_map, save_path_name = \"examples.png\"):\n \"\"\"Visualizes input image, segmentation map and overlay view.\"\"\"\n plt.figure(figsize=(15, 5))\n grid_spec = gridspec.GridSpec(1, 3, width_ratios=[6, 6, 6])\n\n plt.subplot(grid_spec[0])\n plt.imshow(image)\n plt.axis('off')\n plt.title('input image')\n\n plt.subplot(grid_spec[1])\n seg_image = seg_map\n plt.imshow(seg_image)\n plt.axis('off')\n plt.title('segmentation map')\n\n plt.subplot(grid_spec[2])\n plt.imshow(image)\n plt.imshow(seg_image, alpha=0.7)\n plt.axis('off')\n plt.title('segmentation overlay')\n\n # ax = plt.subplot(grid_spec[3])\n # legend_elements = [Line2D([0], [0], color='black', lw=4, label='Background'),\n # Line2D([0], [0], color='gray', lw=4, label='Unknow Area'),\n # Line2D([0], [0], color='white', lw=4, label='Foreground')]\n # ax.legend(handles=legend_elements,loc = \"center\")\n # plt.axis('off')\n # plt.show()\n\n plt.savefig(save_path_name)\n plt.close('all')\n \nif __name__ == '__main__':\n ### test generator_random_trimap()\n # img_mask = cv.imread('trimap_test_out/supervisely4847.png',0) \n # i = 0\n # for i in list(range(10)):\n # generator_random_trimap(img_mask,i)\n\n ### test random_rescale_image_and_mask()\n mask = cv2.imread(\"/home/datalab/ex_disk1/bulang/segmentation_models/checkpoint/FPN_res50_20190430/vis_train_pair_1/total_1_4112_trimap.png\",0)\n print(mask.shape)\n trimap = random_trimap(mask)\n print(trimap.shape)\n cv2.imwrite(\"/home/datalab/ex_disk1/bulang/segmentation_models/checkpoint/FPN_res50_20190430/vis_train_pair_1/total_4112_result_trimap.png\",trimap)\n\n\n\n\n", "sub_path": "utils/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 16999, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "matplotlib.use", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path", "line_number": 19, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.load", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.python.client.device_lib.list_local_devices", "line_number": 53, "usage_type": "call"}, {"api_name": "tensorflow.python.client.device_lib", "line_number": 53, "usage_type": "name"}, {"api_name": "multiprocessing.cpu_count", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 73, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 82, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 84, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 89, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 89, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 90, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 90, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 99, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 100, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 109, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 109, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 110, "usage_type": "call"}, {"api_name": "cv2.INTER_CUBIC", "line_number": 110, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.style.use", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.style", "line_number": 116, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 125, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 125, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "random.uniform", "line_number": 134, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 139, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 139, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 140, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 140, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 148, "usage_type": "attribute"}, {"api_name": "cv2.getStructuringElement", "line_number": 150, "usage_type": "call"}, {"api_name": "cv2.MORPH_ELLIPSE", "line_number": 150, "usage_type": "attribute"}, {"api_name": "cv2.dilate", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 151, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 151, "usage_type": "attribute"}, {"api_name": "cv2.erode", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 152, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 158, "usage_type": "attribute"}, {"api_name": "numpy.where", "line_number": 165, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 171, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 184, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 184, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 187, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 187, "usage_type": "attribute"}, {"api_name": "keras.utils.to_categorical", "line_number": 209, "usage_type": "call"}, {"api_name": "keras.utils.to_categorical", "line_number": 221, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 226, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 226, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 256, "usage_type": "call"}, {"api_name": "numpy.vstack", "line_number": 259, "usage_type": "call"}, {"api_name": "cv2.getRotationMatrix2D", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 263, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 273, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 274, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.matrix", "line_number": 302, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 305, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 309, "usage_type": "attribute"}, {"api_name": "math.floor", "line_number": 325, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 325, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 326, "usage_type": "attribute"}, {"api_name": "math.pi", "line_number": 327, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 329, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 329, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 330, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 330, "usage_type": "call"}, {"api_name": "math.atan2", "line_number": 332, "usage_type": "call"}, {"api_name": "math.pi", "line_number": 334, "usage_type": "attribute"}, {"api_name": "math.cos", "line_number": 338, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 339, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 341, "usage_type": "call"}, {"api_name": "math.tan", "line_number": 342, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 371, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 372, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 372, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 381, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 382, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 383, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 383, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 387, "usage_type": "attribute"}, {"api_name": "cv2.INTER_NEAREST", "line_number": 389, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 390, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 390, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 399, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 400, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 401, "usage_type": "call"}, {"api_name": "cv2.INTER_NEAREST", "line_number": 401, "usage_type": "attribute"}, {"api_name": "numpy.uint8", "line_number": 405, "usage_type": "attribute"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 407, "usage_type": "attribute"}, {"api_name": "numpy.round", "line_number": 415, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 416, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 417, "usage_type": "call"}, {"api_name": "cv2.INTER_LINEAR", "line_number": 417, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 424, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 424, "usage_type": "name"}, {"api_name": "matplotlib.gridspec.GridSpec", "line_number": 425, "usage_type": "call"}, {"api_name": "matplotlib.gridspec", "line_number": 425, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 427, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 427, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 428, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 428, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 429, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 429, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 430, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 430, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 432, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 432, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 434, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 434, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 435, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 435, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 436, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 436, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 438, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 438, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 439, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 439, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 440, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 440, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 441, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 441, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 442, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 442, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 452, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 452, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 453, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 453, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 463, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 467, "usage_type": "call"}]} +{"seq_id": "527069420", "text": "# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport sys\nimport time\nimport os\nimport itertools\n\n\nimport scipy.misc as misc\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\n\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import roc_curve, auc\nfrom sklearn.preprocessing import label_binarize\nfrom scipy import interp\n\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \ndef plot_ROC(y_list,yhat_list):\n y = label_binarize(np.array(yList) ,classes=[0,1])\n n_classes = y.shape[1]\n y_test = np.array(y) \n y_score = np.array(yhat_list) \n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n for i in range(n_classes):\n fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n \n print(roc_auc)\n \n all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))\n\n # Then interpolate all ROC curves at this points\n mean_tpr = np.zeros_like(all_fpr)\n for i in range(n_classes):\n mean_tpr += interp(all_fpr, fpr[i], tpr[i])\n \n # Finally average it and compute AUC\n mean_tpr /= n_classes\n \n fpr[\"macro\"] = all_fpr\n tpr[\"macro\"] = mean_tpr\n roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n \n # Plot all ROC curves\n plt.figure()\n plt.plot(fpr[\"micro\"], tpr[\"micro\"],\n label='micro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"micro\"]),\n color='deeppink', linestyle=':', linewidth=4)\n \n plt.plot(fpr[\"macro\"], tpr[\"macro\"],\n label='macro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"macro\"]),\n color='navy', linestyle=':', linewidth=4)\n \n colors = itertools.cycle(['NG','OK'])\n for i, color in zip(range(n_classes), colors):\n plt.plot(fpr[i], tpr[i], color=color, lw=lw,\n label='ROC curve of class {0} (area = {1:0.2f})'\n ''.format(i, roc_auc[i]))\n \n plt.plot([0, 1], [0, 1], 'k--', lw=lw)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Some extension of Receiver operating characteristic to multi-class')\n plt.legend(loc=\"lower right\")\n plt.show()\n \n \n\ndef load_graph(model_file):\n graph = tf.Graph()\n graph_def = tf.GraphDef()\n\n with open(model_file, \"rb\") as f:\n graph_def.ParseFromString(f.read())\n with graph.as_default():\n tf.import_graph_def(graph_def)\n\n return graph\n\ndef read_tensor_from_image_file(file_name, input_height=299, input_width=299,\n\t\t\t\tinput_mean=0, input_std=255):\n input_name = \"file_reader\"\n output_name = \"normalized\"\n \n img = misc.imread(file_name,mode='RGB')\n img_height,img_width,img_channels = img.shape \n img = misc.imresize(img,(input_height,input_width,img_channels),interp='bilinear',mode='RGB')\n img = np.expand_dims(img,0)\n img = (img-input_mean)/input_std\n return img \n\ndef load_labels(label_file):\n label = []\n proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()\n for l in proto_as_ascii_lines:\n label.append(l.rstrip())\n return label\n\nif __name__ == \"__main__\":\n# file_name = \"tf_files/flower_photos/daisy/3475870145_685a19116d.jpg\"\n model_file = \"tf_files/retrained_graph.pb\"\n label_file = \"tf_files/retrained_labels.txt\"\n input_height = 224\n input_width = 224\n input_mean = 128\n input_std = 128\n input_layer = \"input\"\n output_layer = \"final_result\"\n \n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--imagePath\", help=\"image to be processed\")\n parser.add_argument(\"--graph\", help=\"graph/model to be executed\")\n parser.add_argument(\"--labels\", help=\"name of file containing labels\")\n# parser.add_argument(\"--input_height\", type=int, help=\"input height\")\n# parser.add_argument(\"--input_width\", type=int, help=\"input width\")\n parser.add_argument(\"--input_mean\", type=int, help=\"input mean\")\n parser.add_argument(\"--input_std\", type=int, help=\"input std\")\n parser.add_argument(\"--input_layer\", help=\"name of input layer\")\n parser.add_argument(\"--output_layer\", help=\"name of output layer\")\n args = parser.parse_args()\n\n if args.graph:\n model_file = args.graph\n if args.imagePath:\n path = args.imagePath\n if args.labels:\n label_file = args.labels\n# if args.input_height:\n# input_height = args.input_height\n# if args.input_width:\n# input_width = args.input_width\n if args.input_mean:\n input_mean = args.input_mean\n if args.input_std:\n input_std = args.input_std\n if args.input_layer:\n input_layer = args.input_layer\n if args.output_layer:\n output_layer = args.output_layer\n \n graph = load_graph(model_file)\n\n\n input_name = \"import/\" + input_layer\n output_name = \"import/\" + output_layer\n keepprob = \"import/input_1/Keep_prob\"\n input_operation = graph.get_operation_by_name(input_name)\n output_operation = graph.get_operation_by_name(output_name) \n keep_prob = graph.get_operation_by_name(keepprob)\n \n labelList = []\n yList = []\n predictList = []\n y_score = []\n with tf.Session(graph=graph) as sess:\n for i in os.listdir(path): \n if 'NG' in i:\n labelList.append(0)\n yList.append([1,0])\n elif 'OK' in i:\n labelList.append(1)\n yList.append([0,1])\n file_name = os.path.join(path,i)\n t = read_tensor_from_image_file(file_name,\n input_height=input_height,\n input_width=input_width,\n input_mean=input_mean,\n input_std=input_std)\n start = time.time()\n results = sess.run(output_operation.outputs[0],{input_operation.outputs[0]: t,keep_prob.outputs[0]:1.0})\n end=time.time()\n results = np.squeeze(results)\n \n y_score.append(list(results))\n \n top_k = results.argsort()[-5:][::-1] \n predictList.append(top_k[0]) \n labels = load_labels(label_file)\n \n \n print('\\nEvaluation time (1-image): {:.3f}s\\n'.format(end-start))\n print('Imagename = {}'.format(i))\n for j in top_k: \n print(labels[j], results[j])\n \n \n \n cnf_matrix = confusion_matrix(np.array(predictList), np.array(labelList))\n \n \n \n\n # Compute micro-average ROC curve and ROC area\n# fpr[\"micro\"], tpr[\"micro\"], _ = roc_curve(y_test.ravel(), y_score.ravel())\n# roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n \n plt.figure(1)\n plot_confusion_matrix(cnf_matrix, classes=['NG','OK'],\n title='Confusion matrix, without normalization')\n \n \n# plt.figure(2)\n# lw = 2\n# plt.plot(fpr[0], tpr[0], color='darkorange',\n# lw=lw, label='ROC curve (area = %0.2f)' % roc_auc[0])\n# plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n# plt.xlim([0.0, 1.0])\n# plt.ylim([0.0, 1.05])\n# plt.xlabel('False Positive Rate')\n# plt.ylabel('True Positive Rate')\n# plt.title('Receiver operating characteristic example')\n# plt.legend(loc=\"lower right\") \n plt.show()", "sub_path": "scripts/inferenceConfusionMatrix.py", "file_name": "inferenceConfusionMatrix.py", "file_ext": "py", "file_size_in_byte": 9330, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "matplotlib.pyplot.cm", "line_number": 41, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}, {"api_name": "numpy.newaxis", "line_number": 47, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 53, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 53, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "itertools.product", "line_number": 62, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.text", "line_number": 63, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 63, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "sklearn.preprocessing.label_binarize", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 71, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 74, "usage_type": "call"}, {"api_name": "sklearn.metrics.roc_curve", "line_number": 79, "usage_type": "call"}, {"api_name": "sklearn.metrics.auc", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 87, "usage_type": "call"}, {"api_name": "scipy.interp", "line_number": 89, "usage_type": "call"}, {"api_name": "sklearn.metrics.auc", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 99, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 99, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 105, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 105, "usage_type": "name"}, {"api_name": "itertools.cycle", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 112, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 112, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 119, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 119, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 120, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "tensorflow.Graph", "line_number": 128, "usage_type": "call"}, {"api_name": "tensorflow.GraphDef", "line_number": 129, "usage_type": "call"}, {"api_name": "tensorflow.import_graph_def", "line_number": 134, "usage_type": "call"}, {"api_name": "scipy.misc.imread", "line_number": 143, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 143, "usage_type": "name"}, {"api_name": "scipy.misc.imresize", "line_number": 145, "usage_type": "call"}, {"api_name": "scipy.misc", "line_number": 145, "usage_type": "name"}, {"api_name": "numpy.expand_dims", "line_number": 146, "usage_type": "call"}, {"api_name": "tensorflow.gfile.GFile", "line_number": 152, "usage_type": "call"}, {"api_name": "tensorflow.gfile", "line_number": 152, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 169, "usage_type": "call"}, {"api_name": "tensorflow.Session", "line_number": 214, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 222, "usage_type": "call"}, {"api_name": "os.path", "line_number": 222, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 228, "usage_type": "call"}, {"api_name": "time.time", "line_number": 230, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 231, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 247, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 247, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 256, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 256, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 272, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 272, "usage_type": "name"}]} +{"seq_id": "59310974", "text": "import csv\nimport requests\nfrom pathlib import Path\nimport shutil\n\nimport click\nfrom github import Github\nfrom git import Repo\nfrom tqdm import tqdm\n\nfrom openpecha.serializers import *\nfrom openpecha.blupdate import Blupdate\nfrom openpecha.formatters import *\n\n\nOP_PATH = Path('./.openpecha')\nconfig = {\n # Github\n 'OP_CATALOG_URL': 'https://raw.githubusercontent.com/OpenPoti/openpecha-catalog/master/data/catalog.csv',\n 'OP_ORG': 'https://github.com/OpenPecha',\n\n # Local\n 'OP_DATA_PATH': (OP_PATH/'data').resolve(),\n 'OP_CATALOG_PATH': (OP_PATH/'data'/'catalog.csv').resolve(),\n 'CONFIG_PATH': (OP_PATH/'config').resolve(),\n 'DATA_CONFIG_PATH': (OP_PATH/'config'/'data_config').resolve(),\n}\n\nERROR = '[ERROR] {}'\nINFO = '[INFO] {}'\n\ndef get_pecha_id(n):\n return f'P{int(n):06}'\n\n@click.group()\ndef cli():\n pass\n\n\ndef create_config_dirs():\n config['OP_DATA_PATH'].mkdir(parents=True, exist_ok=True)\n config['CONFIG_PATH'].mkdir(parents=True, exist_ok=True)\n\n\ndef get_pecha(id, batch_path, layers):\n\n def _check_pecha(id=None, pechas=None, layer=None, pecha_list=None):\n if id not in pecha_list:\n if id in pechas:\n if layer:\n if layer in pechas[id][-1].split('_'):\n pecha_list.append(id)\n else:\n msg = f'{layer} layer is not found in {id}'\n click.echo(ERROR.format(msg))\n else:\n pecha_list.append(id)\n else:\n msg = f'{id} not found in OpenPecha catalog'\n click.echo(ERROR.format(msg))\n\n def _get_batch(batch_path):\n with Path(batch_path).open() as f:\n batch_ids = [line.strip() for line in f.readlines()]\n return batch_ids\n\n\n pecha_list = []\n\n # If filter by layers\n if layers:\n layers_name = [l.strip() for l in layers.split(',')]\n for layer in layers_name:\n batch_ids = None\n if id:\n _check_pecha(id=id, pechas=pechas, layer=layer, pecha_list=pecha_list)\n elif batch_path:\n if not batch_ids: batch_ids = _get_batch(batch_path)\n for b_id in batch_ids:\n _check_pecha(id=b_id, pechas=pechas, layer=layer, pecha_list=pecha_list)\n else:\n for p_id in pechas:\n _check_pecha(id=p_id, pechas=pechas, layer=layer, pecha_list=pecha_list)\n else:\n if id:\n _check_pecha(id=id, pechas=pechas, pecha_list=pecha_list)\n elif batch_path:\n batch_ids = _get_batch(batch_path)\n for b_id in batch_ids:\n _check_pecha(id=b_id, pechas=pechas, pecha_list=pecha_list)\n else:\n for p_id in pechas:\n _check_pecha(id=p_id, pechas=pechas, pecha_list=pecha_list)\n\n return pecha_list\n\n\ndef download_pecha(pecha, out):\n # clone the repo\n pecha_url = f\"{config['OP_ORG']}/{pecha}.git\"\n pecha_path = config['OP_DATA_PATH']/pecha\n if pecha_path.is_dir(): # if repo is already exits at local then try to pull\n repo = Repo(str(pecha_path))\n repo.heads['master'].checkout()\n repo.remotes.origin.pull()\n else:\n Repo.clone_from(pecha_url, str(pecha_path))\n\n\n# Poti Download command\n@cli.command()\n@click.option('--number', '-n', help='Pecha number of pecha, for single pecha download')\n@click.option('--batch', '-b', help=\"path to text file containg list of names of \\\n pecha in separate line. Poti batch download\")\n@click.option('--filter', '-f', help='filter pecha by layer availability, specify \\\n layer names in comma separated, eg: title,yigchung,..')\n@click.option('--out', '-o', default='./pecha',\n help='directory to store all the pecha')\ndef download(**kwargs):\n '''\n Command to download pecha.\n If id and batch options are not provided then it will download all the pecha.\n '''\n pecha_id = get_pecha_id(kwargs['number'])\n\n # create config dirs\n create_config_dirs()\n\n # configure the data_path\n config['data'] = Path(kwargs['out']).resolve()\n\n # get pecha\n # pechas = get_pecha(work_id, kwargs['batch'], kwargs['filter'])\n pechas = [pecha_id]\n\n # download the repo\n for pecha in tqdm(pechas):\n download_pecha(pecha, kwargs['out'])\n\n # save data_path in data_config\n config_path = config['DATA_CONFIG_PATH']\n if not config_path.is_file():\n config_path.write_text(str(config['data'].resolve()))\n\n # print location of data\n msg = f'Downloaded {pecha_id} ... ok'\n click.echo(INFO.format(msg))\n\n\n# Apply layer command\nlayers_name = ['title', 'tsawa', 'yigchung', 'quotes', 'sapche']\n\n@cli.command()\n@click.option('--name', '-n', type=click.Choice(layers_name), \\\n help='name of a layer to be applied')\n@click.option('--list', '-l', help='list of name of layers to applied, \\\n name of layers should be comma separated')\n@click.argument('work_number')\n@click.argument('out', type=click.File('w'))\ndef layer(**kwargs):\n \"\"\"\n Command to apply a single layer, multiple layers or all available layers (by default) and then export to markdown.\\n\n Args:\\n\n - WORK_NUMBER is the work number of the pecha, from which given layer will be applied\\n\n - OUT is the filename to the write the result. Currently support only Markdown file.\n \"\"\"\n work_id = get_pecha_id(kwargs['work_number'])\n opfpath = config[\"OP_DATA_PATH\"]/work_id/f'{work_id}.opf'\n serializer = SerializeMd(opfpath)\n if kwargs['name']:\n serializer.apply_layer(kwargs['name'])\n elif kwargs['list']:\n layers = kwargs['list'].split(',')\n serializer.layers = layers\n serializer.apply_layers()\n else:\n serializer.apply_layers()\n\n result = serializer.get_result()\n click.echo(result, file=kwargs['out'])\n\n # logging\n msg = f'Output is save at: {kwargs[\"out\"].name}'\n click.echo(INFO.format(msg))\n\n\ndef pecha_list():\n return [pecha.name for pecha in config['OP_DATA_PATH'].iterdir()]\n\ndef get_data_path():\n return Path(config['DATA_CONFIG_PATH'].read_text().strip())\n\n\ndef check_edits(w_id):\n edit_path = get_data_path()\n data_path = config['OP_DATA_PATH']\n\n srcbl = (data_path/f'{w_id}'/f'{w_id}.opf'/'base.txt').read_text()\n dstbl = (edit_path/f'{w_id}.txt').read_text()\n\n return srcbl != dstbl, srcbl, dstbl\n\n\ndef setup_credential(repo):\n # setup authentication, if not done\n if not (config['CONFIG_PATH']/'credential').is_file():\n username = click.prompt('Github Username')\n password = click.prompt('Github Password', hide_input=True)\n # save credential\n (config['CONFIG_PATH']/'credential').write_text(f'{username},{password}')\n\n if not '@' in repo.remotes.origin.url:\n # get user credentials\n credential = (config['CONFIG_PATH']/'credential').read_text()\n username, password = [s.strip() for s in credential.split(',')]\n \n old_url = repo.remotes.origin.url.split('//')\n repo.remotes.origin.set_url(\n f'{old_url[0]}//{username}:{password}@{old_url[1]}'\n )\n \n return repo\n\n\ndef github_push(repo, branch_name, msg='made edits'):\n # credential\n repo = setup_credential(repo)\n\n # checkout to edited branch\n if branch_name in repo.heads:\n current = repo.heads[branch_name]\n else:\n current = repo.create_head(branch_name)\n current.checkout()\n\n # Add, commit and push the edited branch\n if repo.is_dirty():\n repo.git.add(A=True)\n repo.git.commit(m=msg)\n try: \n repo.git.push('--set-upstream', 'origin', current)\n except Exception as e:\n print(e)\n msg = f'Authentication failed: Try again later'\n click.echo(ERROR.format(msg))\n return False\n\n # finally checkout to master for apply layer on validated text\n repo.heads['master'].checkout()\n \n return True\n\n\ndef repo_reset(repo, branch_name):\n # remove edited branch\n repo.heads['master'].checkout()\n repo.delete_head(repo.heads[branch_name], force=True)\n\n # reset to the origin url\n url = repo.remotes.origin.url.split('@')\n protocol = url[0].split('//')[0]\n repo.remotes.origin.set_url(\n f'{protocol}//{url[1]}'\n )\n\n\n# Update annotations command\n@cli.command()\n@click.argument('work_number')\ndef update(**kwargs):\n \"\"\"\n Command to update the base text with your edits.\n \"\"\"\n work_id = get_pecha_id(kwargs['work_number'])\n if work_id:\n if work_id in pecha_list():\n repo_path = config[\"OP_DATA_PATH\"]/work_id\n repo = Repo(str(repo_path))\n\n # if edited branch exists, then to check for changes in edited branch\n branch_name = 'edited'\n if branch_name in repo.heads:\n current = repo.heads[branch_name]\n current.checkout()\n\n is_changed, srcbl, dstbl = check_edits(work_id)\n if is_changed:\n msg = f'Updating {work_id} base text.'\n click.echo(INFO.format(msg))\n\n # Update layer annotations\n updater = Blupdate(srcbl, dstbl)\n opfpath = repo_path/f'{work_id}.opf'\n updater.update_annotations(opfpath)\n\n # Update base-text\n src = get_data_path()/f'{work_id}.txt'\n dst = opfpath/'base.txt'\n shutil.copy(str(src), str(dst))\n\n # Create edited branch and push to Github\n status = github_push(repo, branch_name)\n\n # logging\n if status:\n msg = f'Pecha edits {work_id} are uploaded for futher validation'\n click.echo(INFO.format(msg))\n else:\n repo_reset(repo, branch_name)\n else:\n msg = f'There are no changes in Pecha {work_id}'\n click.echo(ERROR.format(msg))\n else:\n msg = f'{work_id} does not exits, check the work-id'\n click.echo(ERROR.format(msg))\n\n\n# OpenPecha Formatter\nformatter_types = ['ocr', 'hfml(default)', 'tsadra']\n\n@cli.command()\n@click.option('--name', '-n', type=click.Choice(formatter_types),\n help='Type of formatter')\n@click.option('--id', '-i', type=int,\n help='Id of the pecha')\n@click.argument('input_path')\ndef format(**kwargs):\n '''\n Command to format pecha into opf\n '''\n formatter = HFMLFormatter()\n if kwargs['name'] == 'ocr':\n formatter = GoogleOCRFormatter()\n elif kwargs['name'] == 'tsadra':\n formatter = HFMLFormatter()\n\n formatter.create_opf(kwargs['input_path'], kwargs['id'])\n\n\n@cli.command()\n@click.option('--text_id', '-ti', type=str, help='text id of text')\n@click.option('--vol_number', '-vn', type=int, help='vol number')\n@click.argument('pecha_num')\ndef edit(**kwargs):\n '''\n Command to export Pecha for editing work\n '''\n pecha_id = get_pecha_id(kwargs['pecha_num'])\n opf_path = f'{config[\"OP_DATA_PATH\"]}/{pecha_id}/{pecha_id}.opf'\n\n if kwargs['text_id']:\n serializer = SerializeHFML(opf_path, text_id=kwargs['text_id'])\n out_fn = f'{pecha_id}-{kwargs[\"text_id\"]}.txt'\n elif kwargs['vol_number']:\n vol_id = f'v{kwargs[\"vol_number\"]:03}'\n serializer = SerializeHFML(opf_path, vol_id=vol_id)\n out_fn = f'{pecha_id}-{vol_id}.txt'\n else:\n serializer = SerializeHFML(opf_path)\n out_fn = f'{pecha_id}-v001.txt'\n\n serializer.apply_layers()\n \n result = serializer.get_result()\n click.echo(result, file=open(out_fn, 'w'))\n\n # logging\n msg = f'Output is save at: {out_fn}'\n click.echo(INFO.format(msg))", "sub_path": "openpecha/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 11948, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pathlib.Path", "line_number": 16, "usage_type": "call"}, {"api_name": "click.group", "line_number": 35, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 55, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 60, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 63, "usage_type": "call"}, {"api_name": "git.Repo", "line_number": 103, "usage_type": "call"}, {"api_name": "git.Repo.clone_from", "line_number": 107, "usage_type": "call"}, {"api_name": "git.Repo", "line_number": 107, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 130, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 137, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 147, "usage_type": "call"}, {"api_name": "click.option", "line_number": 112, "usage_type": "call"}, {"api_name": "click.option", "line_number": 113, "usage_type": "call"}, {"api_name": "click.option", "line_number": 115, "usage_type": "call"}, {"api_name": "click.option", "line_number": 117, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 180, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 184, "usage_type": "call"}, {"api_name": "click.option", "line_number": 154, "usage_type": "call"}, {"api_name": "click.Choice", "line_number": 154, "usage_type": "call"}, {"api_name": "click.option", "line_number": 156, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 158, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 159, "usage_type": "call"}, {"api_name": "click.File", "line_number": 159, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 191, "usage_type": "call"}, {"api_name": "click.prompt", "line_number": 207, "usage_type": "call"}, {"api_name": "click.prompt", "line_number": 208, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 245, "usage_type": "call"}, {"api_name": "git.Repo", "line_number": 278, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 289, "usage_type": "call"}, {"api_name": "openpecha.blupdate.Blupdate", "line_number": 292, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 299, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 307, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 312, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 315, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 269, "usage_type": "call"}, {"api_name": "click.option", "line_number": 322, "usage_type": "call"}, {"api_name": "click.Choice", "line_number": 322, "usage_type": "call"}, {"api_name": "click.option", "line_number": 324, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 326, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 365, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 369, "usage_type": "call"}, {"api_name": "click.option", "line_number": 341, "usage_type": "call"}, {"api_name": "click.option", "line_number": 342, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 343, "usage_type": "call"}]} +{"seq_id": "86236566", "text": "from utils.json_response import json_response\nfrom exceptions.check_violation_error import CheckViolationError\nfrom exceptions.invalid_login_error import InvalidLoginException\nfrom exceptions.invalid_data_type import InvalidDataTypeException\nfrom exceptions.reimbursement_not_found_error import ReimbursementNotFoundException\nfrom exceptions.unauthorized_action_error import UnauthorizedActionException\nfrom exceptions.worker_not_found_error import WorkerNotFoundException\n\n\n# handle possible exceptions in this app\ndef handle_exceptions(route_handler):\n def handler(*args, **kwargs):\n try:\n return route_handler(*args, **kwargs)\n except CheckViolationError as e:\n return json_response(e.summary, e.code), e.code\n except InvalidDataTypeException as e:\n return json_response(e.summary, e.code), e.code\n except InvalidLoginException as e:\n return json_response(e.summary, e.code), e.code\n except ReimbursementNotFoundException as e:\n return json_response(e.summary, e.code), e.code\n except UnauthorizedActionException as e:\n return json_response(e.summary, e.code), e.code\n except WorkerNotFoundException as e:\n return json_response(e.summary, e.code), e.code\n except TypeError as e:\n return json_response(str(e), 400), 400\n except ValueError as e:\n return json_response(str(e), 404), 404\n except LookupError as e:\n return json_response(str(e), 400), 400\n except Exception as e:\n return json_response(str(e), 500), 500\n handler.__name__ = route_handler.__name__\n return handler\n", "sub_path": "server/utils/handle_exceptions.py", "file_name": "handle_exceptions.py", "file_ext": "py", "file_size_in_byte": 1683, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "exceptions.check_violation_error.CheckViolationError", "line_number": 15, "usage_type": "name"}, {"api_name": "utils.json_response.json_response", "line_number": 16, "usage_type": "call"}, {"api_name": "exceptions.invalid_data_type.InvalidDataTypeException", "line_number": 17, "usage_type": "name"}, {"api_name": "utils.json_response.json_response", "line_number": 18, "usage_type": "call"}, {"api_name": "exceptions.invalid_login_error.InvalidLoginException", "line_number": 19, "usage_type": "name"}, {"api_name": "utils.json_response.json_response", "line_number": 20, "usage_type": "call"}, {"api_name": "exceptions.reimbursement_not_found_error.ReimbursementNotFoundException", "line_number": 21, "usage_type": "name"}, {"api_name": "utils.json_response.json_response", "line_number": 22, "usage_type": "call"}, {"api_name": "exceptions.unauthorized_action_error.UnauthorizedActionException", "line_number": 23, "usage_type": "name"}, {"api_name": "utils.json_response.json_response", "line_number": 24, "usage_type": "call"}, {"api_name": "exceptions.worker_not_found_error.WorkerNotFoundException", "line_number": 25, "usage_type": "name"}, {"api_name": "utils.json_response.json_response", "line_number": 26, "usage_type": "call"}, {"api_name": "utils.json_response.json_response", "line_number": 28, "usage_type": "call"}, {"api_name": "utils.json_response.json_response", "line_number": 30, "usage_type": "call"}, {"api_name": "utils.json_response.json_response", "line_number": 32, "usage_type": "call"}, {"api_name": "utils.json_response.json_response", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "220451734", "text": "import matplotlib.pyplot as plt\r\nimport matplotlib.animation\r\n\r\nplt.close()\r\n\r\n\r\n#function that gets the second to last line\r\ndef regelKrijgen():\r\n with open('temperatuurDruk.txt', 'r') as f:\r\n lines = f.read().splitlines()\r\n regel = lines[-2]\r\n return regel\r\n\r\n#function that splits the line read into the different variables\r\ndef waarde(regel,tijds =[],temps = [],druks =[],hoogtes=[]):\r\n arraytje = (str(regel)).split(\"X\")\r\n if len(arraytje) == 6:\r\n time = float(arraytje[1]) / 1000\r\n temperatuur = arraytje[2]\r\n druk = arraytje[3]\r\n hoogte = arraytje[4]\r\n return float(time),float(temperatuur),float(druk),float(hoogte)\r\n else:\r\n print(\"crisis man\")\r\n return tijds[-1],temps[-1],druks[-1],hoogtes[-1]\r\n\r\n\r\nfig, ax = plt.subplots(3)\r\n\r\n\r\ntijds = []\r\ndruks = []\r\ntemps = []\r\nhoogtes = []\r\n\r\n\r\n#animation function for both subplots\r\ndef animate(i):\r\n if tijds:\r\n tijd,temperatuur,druk,hoogte = waarde(regelKrijgen(),tijds,temps,druks,hoogtes)\r\n else:\r\n tijd,temperatuur,druk,hoogte = waarde(regelKrijgen())\r\n \r\n tijds.append(tijd)\r\n temps.append(temperatuur)\r\n druks.append(druk)\r\n hoogtes.append(hoogte)\r\n \r\n ax[0].clear()\r\n ax[0].plot(tijds, temps,label = 'temperatuur', color = 'r')\r\n ax[0].set_xlabel('tijd')\r\n ax[0].set_ylabel('temperatuur')\r\n ax[0].grid()\r\n ax[1].clear()\r\n ax[1].plot(tijds, druks,label = 'druk', color = 'hotpink')\r\n ax[1].set_xlabel('tijd')\r\n ax[1].set_ylabel('druk')\r\n ax[1].grid()\r\n ax[2].clear()\r\n ax[2].plot(tijds, hoogtes,label = 'hoogte',color = 'b')\r\n ax[2].set_xlabel('tijd')\r\n ax[2].set_ylabel('hoogte in meters')\r\n ax[2].grid()\r\n \r\n\r\n\r\n#showing animation\r\nani = matplotlib.animation.FuncAnimation(fig, animate)\r\n\r\nplt.show()\r\n\r\n\r\n", "sub_path": "python/Live plotten met coolTermWin.py", "file_name": "Live plotten met coolTermWin.py", "file_ext": "py", "file_size_in_byte": 1829, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "matplotlib.pyplot.close", "line_number": 4, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 4, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.animation.FuncAnimation", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.animation", "line_number": 68, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}]} +{"seq_id": "437621909", "text": "import datetime\nfrom collections import namedtuple\nimport urllib.parse\nimport bs4\nimport requests\nimport dateparser\n\nInnerBlock = namedtuple('Block', 'title, price, currency, date, url')\n\n\nclass Block(InnerBlock):\n\n def __str__(self):\n return f'{self.title}\\t{self.price} {self.currency}\\t{self.date}\\t{self.url}'\n\n\nclass AParcer:\n\n def __init__(self):\n self.session = requests.Session()\n self.session.headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'\n }\n\n def get_page(self, page: int = None):\n params = {\n 'cd': 1,\n 'q': 'x5',\n 'radius': 0,\n }\n if page and page > 1:\n params['p'] = page\n\n url = 'https://www.avito.ru/moskva/avtomobili/bmw/x5-ASgBAgICAkTgtg3klyjitg22tCg'\n r = self.session.get(url, params=params)\n return r.text\n\n\n def parse_block(self, item):\n url_block = item.select_one('a.link-link-39EVK.link-design-default-2sPEv.title-root-395AQ.iva-item-title-1Rmmj.title-list-1IIB_.title-root_maxHeight-3obWc')\n\n href = url_block.get('href')\n if href:\n url = 'https://www.avito.ru' + href\n else:\n url = None\n\n # выбрать блок с названием\n title_block = item.select_one('div.iva-item-titleStep-2bjuh span')\n\n title = title_block.string.strip()\n\n # выбрать блок с названием и валютой\n price_block = item.select_one('span.price-text-1HrJ_.text-text-1PdBw.text-size-s-1PUdo')\n price_block = price_block.get_text('\\n')\n price_block = list(filter(None, map(lambda i: i.strip(), price_block.split('\\n'))))\n if len(price_block) == 2:\n price, currency = price_block\n else:\n price, currency = None, None\n print(\"Something going wrong with price:\", price_block)\n\n date_block = item.select_one('div.date-text-2jSvU.text-text-1PdBw.text-size-s-1PUdo.text-color-noaccent-bzEdI')\n a = date_block.get_text('\\n')\n # absolute_date = date_block.get('data-absolute-date')\n absolute_date = dateparser.parse(a, date_formats=['%H:%M'])\n\n return Block(\n url=url,\n title=title,\n price=price,\n currency=currency,\n date=absolute_date,\n )\n\n def get_pagination_limit(self):\n text = self.get_page()\n soup = bs4.BeautifulSoup(text, 'lxml')\n\n container = soup.select('a.pagination-page')\n\n last_button = container[-1]\n\n href = last_button.get('href')\n if not href:\n return 1\n r = urllib.parse.urlparse(href)\n\n params = urllib.parse.parse_qs(r.query)\n\n return int(params['p'][0])\n\n def get_blocks(self, page: int = None):\n text = self.get_page(page=page)\n soup = bs4.BeautifulSoup(text, 'lxml')\n\n container = soup.select('div.iva-item-root-G3n7v.photo-slider-slider-15LoY.iva-item-list-2_PpT.items-item-1Hoqq.items-listItem-11orH.js-catalog-item-enum')\n for item in container:\n block = self.parse_block(item=item)\n print(block)\n\n def parse_all(self):\n limit = self.get_pagination_limit()\n for i in range(1, limit + 1):\n print(\"Total pages: {}\".format(limit))\n self.get_blocks(page=i)\n\n\ndef main():\n p = AParcer()\n # p.get_blocks()\n p.parse_all()\n\n # p.get_pagination_limit()\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 3603, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "collections.namedtuple", "line_number": 8, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 20, "usage_type": "call"}, {"api_name": "dateparser.parse", "line_number": 66, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 78, "usage_type": "call"}, {"api_name": "urllib.parse.parse.urlparse", "line_number": 87, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 87, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 87, "usage_type": "name"}, {"api_name": "urllib.parse.parse.parse_qs", "line_number": 89, "usage_type": "call"}, {"api_name": "urllib.parse.parse", "line_number": 89, "usage_type": "attribute"}, {"api_name": "urllib.parse", "line_number": 89, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 95, "usage_type": "call"}]} +{"seq_id": "207357634", "text": "\"\"\"\nSQLConnect\n----------\nA structured and pre-set way to connect to the server and load the data succinctly.\n\nLoading Tips:\n-----\n- To load data into SQL Server with data type date.\n - df.date = df.date.apply(lambda x: str(x))\n - df.date = df.date.apply(lambda x: x.strfdate(%Y-%m-%d))\n \n \nNotes:\n - getTableInfo will be used from MssqlFrameQuery\n\"\"\"\nimport os as _os\nimport getpass as __getpass__\nimport pandas as _pd\nimport numpy as __np__\nimport datetime as __dt__\nimport re as __re__\nimport warnings as __warnings__\nimport sys as __sys__\nfrom cpug.utils.reference import TickTock as __TT__\nimport sqlite3 as _sql3\nimport pyodbc as __odbc__\n\n__all__ = [\n 'ServerConfig' \n]\n\nclass ServerConfig:\n \n def __init__(self): \n \n # get cpug path info\n # Any attributes set to None are assigned later in the code\n self.cpug_path = self._get_cpug_path() \n self.file_path = _os.path.join(self.cpug_path, \"server_info.txt\")\n self.df_servers = None\n \n # creates the file path if it does not exist\n if _os.path.exists(self.cpug_path):\n print(f\"{__dt__.datetime.today()}: \\n\\tServerCofig Location: {self.file_path}\")\n self.__update_df_servers()\n \n else:\n # it creates the path if it does not exist, not the file\n _os.mkdir(self.cpug_path)\n print(\"Server file was created!\")\n \n # fills the df_server object at init\n self.__update_df_servers()\n \n def _get_cpug_path(self):\n \n # Extract the user name\n username = __getpass__.getuser()\n \n # The cpug path in the user path\n cpug_path = _os.path.join(r'C:\\Users', username, '.cpug')\n \n return cpug_path\n \n def edit_server(self, server_num_name, server_name):\n \n if isinstance(server_num_name, int):\n server_num_name = f\"Server{server_num_name}\"\n \n # deltes the server\n self.delete_server(server_num_name=server_num_name)\n \n # saves the server not necessarily with the same server number\n self.save_server(server_name)\n \n def delete_server(self, server_num_name):\n \n if isinstance(server_num_name, int):\n server_num_name = f\"Server{server_num_name}\"\n \n # deletes server from dataframe\n self.df_servers.drop(server_num_name, inplace=True)\n \n # re-numbering\n server_range = __np__.arange(1, self.df_servers.shape[0] + 1)\n self.df_servers.index = [f\"Server{x}\" for x in server_range]\n self.df_servers.index.name = \"ServerNumber\"\n \n # update the flat file\n self.__update_server_list__()\n self.__update_df_servers()\n \n self.servers_show()\n \n def set_default(self, server_num_name):\n \n if isinstance(server_num_name, int):\n server_num_name = f\"Server{server_num_name}\"\n \n # extracte server 1\n server1 = self.df_servers.iloc[0].values[0].replace(' ', '')\n \n # extract new default server\n server_index_list = self.df_servers.index.tolist()\n new_def_idx = server_index_list.index(server_num_name)\n new_default_server = self.df_servers.iloc[new_def_idx]\n \n # swap servers\n self.df_servers.iloc[0] = new_default_server\n self.df_servers.iloc[new_def_idx] = server1\n \n self.__update_server_list__()\n self.__update_df_servers()\n \n self.servers_show()\n \n def clear_servers(self):\n \n with open(self.file_path, \"w\") as f:\n f.close()\n \n def servers_show(self): \n\n display(self.df_servers)\n \n def _get_saved_server(self, server_number=1):\n \"\"\"\n \n \"\"\"\n server_num_name = \"Server%s\" % server_number\n df_server = self.df_servers\n server = df_server.loc[server_num_name, 'ServerName']\n \n return server \n \n def __update_server_list__(self):\n # preprocessing server write\n df = self.df_servers.copy()\n df = df.reset_index()\n new_server_list = ((df.ServerNumber + ':').str.cat(df.ServerName) + '\\n').values.tolist()\n \n # re-writes the file\n with open(self.file_path, \"w\") as f:\n f.writelines(new_server_list)\n f.close()\n \n def __update_df_servers(self):\n pd = _pd\n \n # open the existing file\n try:\n with open(self.file_path, \"r\") as f:\n r = f.read()\n list_list = [x.split(':') for x in r.split('\\n') if x != '']\n \n self.df_servers = pd.DataFrame(list_list, \n columns=['ServerNumber', 'ServerName'])\\\n .set_index('ServerNumber')\n except:\n first_time = open(self.file_path, 'w+')\n first_time.close()\n \n def save_server(self, server_name, as_default=False):\n \n # The cpug path in the user path\n server = server_name\n cpug_path = self.cpug_path\n \n server_text_path = _os.path.join(cpug_path, 'server_info.txt')\n server_file_cond = _os.path.exists(server_text_path) \n \n if not server_file_cond:\n # it creates the sever_info file if it does not exist\n create = open(server_text_path, 'w')\n create.close()\n \n with open(server_text_path, 'r+') as s:\n lines = s.readlines()\n new_server_num = len(lines) + 1\n \n # if the server is not already saved\n if (''.join(lines).find(server)) < 0: \n s.writelines([f\"Server{new_server_num}: {server}\\n\"])\n print(f\"\\tServer{new_server_num}: {server} was succesfuly saved! :)\")\n s.close()\n self.__update_df_servers()\n if not as_default: self.servers_show()\n else:\n if server != '': \n print(\"\\nThe server already exist in the list.\")\n \n if as_default:\n self.set_default(f\"Server{new_server_num}\")\n \nclass SqlManagement():\n \n def __init__(self, \n engine='msserver',\n server_name=None,\n use_database='master',\n userid=None,\n password=None,\n port=None,\n ref_schema=None,\n ref_table=None,\n conn=None):\n \"\"\"\n SqlManagement is a pyodbc wrapper designed to extract data with ease for data analytics using python.\n parameters\n ----------\n engine : the type of server connection to make (redshift or msserver) -default=msserver\n server_name : the name or endpoint name of the server to connect to\n use_database : the database to access in the server -default=master\n Note: redshift does not have master it has public\n userid : the user name with access to the server (admin or user)\n Note: only needed if the access is not a trusted connection\n password : the password credentials to access the server\n Note: only needed if the access is not a trusted connection\n port : the port number in which the server is communicating\n ref_schema : the reference schema to use for fast reference using ref_table in the select\n ref_table : the reference table to use for fast reference using ref_table in the select\n conn : to reuse any other connection without the need of entering parameters\n \"\"\"\n \n # attributes\n # ------------------------------------------------\n self.engine = engine\n self.server_name = server_name\n self.database = use_database\n self.userid = userid\n self.password = password\n self.port = port \n self.schema = ref_schema\n self.table = ref_table\n self.schema, self.table = self.__infer_schema_table__()\n self.schema_table = '.'.join([self.schema, self.table]) if self.table and self.schema else None\n self.driver = None # to be set in __create_connection__\n self.driver_dict = dict(msserver=['17', '13.1', '13', '11'], redshift=['x64'])\n self.subquery_path = None\n self.autocommit = False\n self.subquery = None\n \n # initiate connection to server\n self.conn = self.__create_connection__()\n \n # INTERNAL METHODS -------------------------------------------------------- \n def __create_connection__(self):\n \"\"\"\n \n \"\"\"\n # extracting package for internal use\n odbc = __odbc__ \n\n # extracting attributes\n engine = self.engine\n server = self.server_name\n database = self.database\n userid = self.userid\n password = self.password\n port = self.port\n autocommit = self.autocommit\n driver_dict = self.driver_dict \n\n # error handling\n if server == None:\n raise ValueError(\"No server provided. Please assing the name of the server using server_name=\") \n\n # try all drivers\n for driver_type in driver_dict.get(engine):\n driver = f\"ODBC Driver {driver_type} for SQL Server\" \\\n if engine == 'msserver' \\\n else f\"Amazon Redshift ({driver_type})\"\n \n # sets the connection string\n if engine == 'msserver':\n conn_string = f\"Driver={driver};\\n\\tServer={server};\\n\\t\"+\\\n f\"Trusted_Connection=yes;\\n\\tDatabase={database}\"\n elif engine == 'redshift':\n conn_string = f\"Driver={driver}; Server={server}; Database={database};\"+\\\n f\"UID={userid}; PWD={password}; Port={port}\"\n\n try:\n conn = odbc.connect(conn_string, autocommit=autocommit)\n print(f'\\n\\tCONNECTION DESCRIPTION: \\n\\t{conn_string}')\n return conn\n except:\n ValueError(f\"Your connection string failed. Make sure you have all the parameters. {conn_string}\")\n\n \n def __infer_schema_table__(self, schema=None, table=None):\n \"\"\"\n To infer the schema and table from the given reference table. \n E.g. ref_table = dbo.talbename\n \"\"\"\n if not schema and not table:\n schema = self.schema\n table = self.table\n \n # split the given full qualified table\n try:\n if table.find('.') >= 0:\n schema = table.split('.')[-2]\n table = table.split('.')[-1] \n except:\n pass\n \n return schema, table\n \n def __read_sql_file__(self, file_path):\n os = _os\n \n self.subquery_path = file_path\n # read the file and set the script\n \n if os.path.exists(file_path) and file_path.find('.sql'):\n # find the query file and extract the subquery\n # setting the file path\n with open(file_path, 'r') as file:\n select_script = file.read()\n return select_script\n else:\n return None\n \n # MAIN METHODS ------------------------------------------------------------ \n def reconnect(self, use_database=None):\n \"\"\"To reconnect to a specific database without re-running the\n MssqlManagement object.\n \"\"\"\n self.database = use_database or self.database\n self.conn.close()\n self.conn = self.__create_connection__()\n \n def create_sql_file(self, filepath, sql_statement):\n \"\"\"\n To create a sql file in the stated path provided.\n \n parameters\n ----------\n filepath : the path of the file to be created including the file name \n sql_statement : the sql statment to be created in the sql file\n \n returns : None\n Ouputs : Creates an sql file with the sql_statement provided in the provided filepath.\n \"\"\"\n if '.sql' not in filepath: filepath += '.sql'\n with open(filepath, 'w+') as sql:\n sql.write(sql_statement)\n sql.close()\n def subquery_show(self):\n if self.subquery == None:\n msg = \"There is no reference subquery. Create one by calling the createOrReplaceSubRef method.\"\n print(msg)\n else:\n print(self.subquery)\n \n def subquery_open(self):\n os = _os\n if self.subquery_path == None:\n msg = \"There is no reference subquery. Create one by calling the createOrReplaceSubRef method.\"\n print(msg)\n else:\n os.startfile(self.subquery_path)\n \n def execute_sql(self, statement): \n \"\"\"\n \n \"\"\"\n cursor = self.conn.cursor()\n cursor.execute(statement)\n cursor.commit()\n \n def append_list(self, table_name, value_list):\n \"\"\"\n \n \"\"\" \n cursor = self.conn.cursor() \n \n string_func = lambda x: f\"'{x}'\" if x != None else f\"Null\"\n convert = ', '.join(list(map(string_func, value_list)))\n insert = f\"({convert})\"\n \n columns = ', '.join(self.run_sql(F\"\"\"\n SELECT *\n FROM {table_name}\n \"\"\").columns.tolist())\n \n insert_statement = f\"\"\"\n INSERT INTO {table_name} ({columns})\n VALUES {insert}\n \"\"\" \n cursor.execute(insert_statement)\n cursor.commit()\n \n def insert_frame(self, \n dataframe, \n table_name, \n truncate=False, \n fast=False, \n create_table=False, \n varchar_size=150):\n \"\"\"\n To insert data in the form of a pandas dataframe.\n NOTE: the pandas data types need to agree with the SQL data types.\n \n parameters\n ----------\n dataframe : the full dataframe to insert\n table_name : the destination table to to the insert to. E.g. dbo.*\n truncate : truncate the table before insert ~default False\n fast : the speed in which the data is insert can increase by setting it\n to True. WARNING! It uses more memory. ~default False\n \"\"\"\n cursor = self.conn.cursor()\n \n # converting nans to none for null insert\n dataframe = dataframe.where(~dataframe.isna(), None)\n dataframe = dataframe.where(lambda x: x!='None', None)\n \n # preparing the insert statement\n column_list = dataframe.columns.tolist()\n insert_columns = ', '.join(column_list)\n insert_placements = ', '.join(['?'] * len(column_list))\n \n # CREATE TABLE\n if create_table == True:\n create_fields = ',\\n'.join(list(\n map(lambda x: f\"\\t{x} VARCHAR({varchar_size}) NULL\", column_list)\n ))\n create_statement = f\"\"\"CREATE TABLE {table_name} (\n {create_fields}\n );\n \"\"\"\n \n self.create_table(create_statement, recreate=True)\n if varchar_size == 150:\n print(\"*using default VARCHAR(150) to create the table for all columns.\")\n \n # INSERT\n insert_statement = f\"\"\"\n INSERT INTO {table_name} ({insert_columns})\n VALUES ({insert_placements});\n \"\"\"\n \n if truncate == True and create_table == False:\n # there is no need to truncate if a new table is created\n truncate_statement = f\"\"\"\n TRUNCATE TABLE {table_name}\n \"\"\"\n cursor.execute(truncate_statement)\n cursor.commit()\n msg = \"The %s table was truncated.\" % table_name\n print(msg)\n \n\n cursor.fast_executemany = fast\n print(f\"\\nStarting the insert {'in' if fast else 'not in'} fast mode...\")\n cursor.executemany(insert_statement, dataframe.values.tolist()) \n cursor.commit()\n \n msg = f\"Data succesfully inserted into {table_name}.\" \n print(msg)\n \n def create_table(self, statement, recreate=False):\n re = __re__\n warning = __warnings__\n \n cursor = self.conn.cursor()\n try:\n table_name = re.search('(\\S..)[.](\\S+.+?)', statement).group().replace(' ', '')\n table_exist = self.getTableInfo(table_name).shape[0] > 0 \n except:\n msg = \"Might me missing the schema. E.g. dbo, etl, raw, etc.\"\n raise ValueError(msg)\n create_msg = 'created'\n \n if recreate: \n drop_statement = f\"\"\"\n DROP TABLE {table_name};\n \"\"\"\n \n if table_exist:\n try:\n cursor.execute(drop_statement)\n cursor.commit()\n create_msg = 'recreated'\n except:\n msg = \"Could not drop %s. Verify if there is a foreign key related to this table.\" % table_name\n warning.warn(msg)\n \n cursor.execute(statement)\n cursor.commit()\n \n msg = \"Table %s has been succesfully %s.\" % (table_name, create_msg)\n print(msg)\n return self.getTableInfo(table_name)\n \n def createOrReplaceRefTable(self, ref_table):\n \"\"\"\n Creates the reference table to be queried as ref_table.\n \"\"\"\n self.schema, self.table = self.__infer_schema_table__(table=ref_table)\n self.schema_table = self.schema + '.' + self.table\n \n print(f\"\\n\\tReference to table is set. Use FROM ref_table to access it.\")\n \n def createOrReplaceSubRef(self, subquery, show_query=False):\n \"\"\"\n Creates the referece subquery to be queried as ref_table\n \"\"\" \n # read the file and set the script\n subquery = self.__read_sql_file__(subquery) or subquery\n \n self.subquery = f\"({subquery}) as sub\"\n \n if not show_query:\n print(\"\\n\\tReference to subquery is set. Use FROM ref_subquery to access it.\")\n else:\n print(f\"ref_subquery: \\n{self.subquery}\")\n \n def getMasterInfo(self):\n \n print(f\"Showing master databases from {self.server_name}\")\n return self.run_sql(\"\"\"\n SELECT name, database_id, create_date \n FROM sys.databases\n \"\"\", show_stats=False)\n \n def getTableInfo(self, table=None, schema=None, look_words=None): # -----\n \"\"\"\n To get information about tables in a database or columns in a specific table\n \n parameters\n ----------\n table : the table from which to query information\n schema : the schema of the table from which to query information\n look_word : parts of description to look for in the table/columns\n \n Note: leaving table and schema empty will make the method \n look for table information\n \"\"\"\n pd = _pd\n np = __np__\n re = __re__\n warnings = __warnings__\n \n schema, table = self.__infer_schema_table__(schema, table)\n \n if schema!=None and table!=None:\n table_fields = \"\"\"TABLE_CATALOG, COLUMN_NAME, TABLE_SCHEMA, DATA_TYPE, \n ISNULL(CHARACTER_MAXIMUM_LENGTH, 0) +\n ISNULL(NUMERIC_PRECISION, 0) + \n ISNULL(DATETIME_PRECISION, 0) AS DATA_SIZE, IS_NULLABLE\"\"\"\n where_clause = f\"WHERE TABLE_NAME='{table}' AND TABLE_SCHEMA='{schema}'\"\n else: \n msg = \"No reference table provided\"\n warnings.warn(msg)\n table_fields = \"DISTINCT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME\"\n where_clause = \"\"\n \n sql_columns = f\"\"\"\n SELECT \n {table_fields} \n FROM INFORMATION_SCHEMA.COLUMNS\n {where_clause}\n \"\"\"\n \n # Get columns and its information\n df = pd.read_sql_query(sql_columns, self.conn)\n pd.options.display.max_rows = 200\n \n if look_words:\n look_words = str(look_words)\n show_idx = df.astype(str).apply(\n lambda x: x.str.contains(\n look_words, regex=True, flags=re.IGNORECASE)).where(\n lambda x: x==True).dropna(how='all').index \n \n # Words found in a string\n words_found_string = ' '.join(\n np.unique(df.astype(str).values.reshape(1, -1)))\n \n words_searched = set(look_words.split('|'))\n \n look_words_fail = [x for x in words_searched \n if words_found_string.lower().find(x)<0]\n \n if len(look_words_fail) > 0:\n print(f\"The following word(s) have/has no match: {look_words_fail}\")\n \n df_lookup = df.loc[show_idx, :]\n \n return df_lookup.set_index('TABLE_CATALOG')\n \n else:\n return df.set_index('TABLE_CATALOG')\n \n def raise_query_runtime(self, df_output):\n if isinstance(df_output, tuple): raise RuntimeError(df_output[1])\n \n def run_sql(self, \n query, \n add_string=None, \n show_columns=False, \n show_stats=True, \n show_query=False, \n in_list=None,\n select_list=None,\n args={}):\n \"\"\" \n \"\"\"\n pd = _pd\n re = __re__\n warnings = __warnings__\n TickTock = __TT__\n \n query = self.__read_sql_file__(query) or query\n \n if self.schema_table == None \\\n and query.find(\"ref_table\") >= 0 \\\n and query.find(\"ref_subquery\") >=0:\n raise ValueError(\"No table or reference was provided\")\n \n #################################################################\n # COLUMNS AND LIST REPLACEMENTS\n #################################################################\n # Extracting list and coverting\n if in_list == []: in_list = ['']\n if in_list:\n in_list = ', '.join([f\"'{item}'\" for item in in_list])\n args.setdefault('in_list', in_list)\n \n if select_list == []: \n select_list = ['Null']\n if select_list:\n select_list = ',\\n'.join(select_list)\n args.setdefault('select_list', select_list)\n \n #################################################################\n # TABLE REPLACEMENTS\n ################################################################# \n # subquery replacement\n query = query.replace(\"ref_subquery\", self.subquery or \"\") \n \n # replace ref_table with the passed table\n # if no schema and table give, use subquery\n # this is useful when iterating on tables and subqueries by just\n # using ref_table to refer to both\n ref_table = self.schema_table if self.schema_table != None else self.subquery\n query = query.replace(\"ref_table\", ref_table or \"\")\n \n # parameter replacements\n query = query.format(**args) \n \n # string parameter replacements\n if add_string:\n query = query % add_string\n #################################################################\n # DATA EXTRACT\n #################################################################\n if show_stats: start = TickTock()\n if show_query: print(query)\n \n try:\n # get data from server\n df = pd.read_sql_query(query, self.conn)\n \n if show_columns: \n \n try:\n # look for a table name\n from_statement = re.findall('\\s*(FROM.*.|from.*.|From.*.)', query, re.DOTALL)[0]\n table_name = from_statement.split()[1].strip()\n \n # show column structure regarless if it is a succesful query\n drop_col = ['IS_NULLABLE', 'TABLE_SCHEMA']\n columns = self.getTableInfo(table_name).drop(drop_col, axis=1)\n print(\"These are the available columns. You are welcome! :)\\n\", \n columns) \n except Exception as err:\n msg = err.args\n warnings.warn(msg)\n \n except Exception as err: \n msg = err.args\n err_msg = msg[0].split(':')[1]\n warnings.warn(msg[0]) \n \n err_code = ''.join(re.findall(\"\"\"[^()\\[\\]'\"]\"\"\", err_msg.split(',')[0]))\n err_msg = ''.join(re.findall(\"\"\"[^()\\[\\]'\"]\"\"\", err_msg.split(',')[1]))\n \n # show column structure when query fails\n try:\n columns = self.getTableInfo(table_name).drop(drop_col, axis=1)\n df = (err_code, err_msg)\n if not self.suppress_error and len(columns) > 0:\n print(\"Auch! At least one field is wrong or no field was provided. Try from these:\\n\", columns)\n except:\n df = (err_code, err_msg)\n \n # show the time to process\n if show_stats: \n start.stop_timer(show_time=True)\n print('Query completed. Connection open. Close connection by calling .close()')\n \n return df\n \n def close(self):\n self.conn.close()\n print(\"The connection is now closed.\")\n\n# adding other ways to run a query\nSqlManagement.sql = SqlManagement.run_sql\n \n \n", "sub_path": "connect/sql.py", "file_name": "sql.py", "file_ext": "py", "file_size_in_byte": 26108, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "os.path", "line_number": 39, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 44, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 44, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 49, "usage_type": "call"}, {"api_name": "getpass.getuser", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path", "line_number": 61, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 170, "usage_type": "call"}, {"api_name": "os.path", "line_number": 170, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 315, "usage_type": "call"}, {"api_name": "os.path", "line_number": 315, "usage_type": "attribute"}, {"api_name": "os.startfile", "line_number": 362, "usage_type": "call"}, {"api_name": "re.search", "line_number": 469, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 556, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 575, "usage_type": "attribute"}, {"api_name": "cpug.utils.reference.TickTock", "line_number": 614, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 671, "usage_type": "call"}, {"api_name": "re.DOTALL", "line_number": 671, "usage_type": "attribute"}, {"api_name": "warnings.warn", "line_number": 681, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 686, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 688, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 689, "usage_type": "call"}]} +{"seq_id": "623192762", "text": "import cv2\nimport os\nimport numpy as np\nimport math\n\nfor filePath in sorted(os.listdir(\"origin\")):\n fileName = os.path.splitext(filePath)[0]\n imagePath = os.path.join(\"origin\", filePath)\n src = cv2.imread(imagePath, cv2.IMREAD_COLOR)\n dst = src.copy()\n yuv_image = cv2.cvtColor(dst, cv2.COLOR_BGR2YUV)\n dst = np.uint64(dst)\n yuv_image = np.uint64(yuv_image)\n b, g, r = cv2.split(dst)\n y, u, v = cv2.split(yuv_image)\n\n b_mean = np.mean(b)\n g_mean = np.mean(g)\n r_mean = np.mean(r)\n b_var = np.var(b)\n g_var = np.var(g)\n r_var = np.var(r)\n b_sig = math.sqrt(b_var)\n g_sig = math.sqrt(g_var)\n r_sig = math.sqrt(r_var)\n\n y_mean = np.mean(y)\n u_mean = np.mean(u)\n v_mean = np.mean(v)\n y_var = np.var(y)\n u_var = np.var(u)\n v_var = np.var(v)\n y_sig = math.sqrt(y_var)\n u_sig = math.sqrt(u_var)\n v_sig = math.sqrt(v_var)\n\n #BG\n exp = 0\n for i in range(0, b.shape[0]):\n for j in range(0, b.shape[1]):\n exp = exp + ((b[i, j] - b_mean) * (g[i, j] - g_mean))\n cov = exp/(b.shape[0] * b.shape[1])\n corr = cov/(b_sig * g_sig)\n print(corr, end=\" \")\n print(\"BG\")\n\n #GR\n exp = 0\n for i in range(0, b.shape[0]):\n for j in range(0, b.shape[1]):\n exp = exp + ((g[i, j] - g_mean) * (r[i, j] - r_mean))\n cov = exp/(b.shape[0] * b.shape[1])\n corr = cov/(g_sig * r_sig)\n print(corr, end=\" \")\n print(\"GR\")\n\n #BR\n exp = 0\n for i in range(0, b.shape[0]):\n for j in range(0, b.shape[1]):\n exp = exp + ((b[i, j] - b_mean) * (r[i, j] - r_mean))\n cov = exp/(b.shape[0] * b.shape[1])\n corr = cov/(b_sig * r_sig)\n print(corr, end=\" \")\n print(\"BR\")\n\n #GY\n exp = 0\n for i in range(0, b.shape[0]):\n for j in range(0, b.shape[1]):\n exp = exp + ((g[i, j] - g_mean) * (y[i, j] - y_mean))\n cov = exp/(b.shape[0] * b.shape[1])\n corr = cov/(g_sig * y_sig)\n print(corr, end=\" \")\n print(\"GY\")\n\n #YU\n exp = 0\n for i in range(0, b.shape[0]):\n for j in range(0, b.shape[1]):\n exp = exp + ((y[i, j] - y_mean) * (u[i, j] - u_mean))\n cov = exp/(b.shape[0] * b.shape[1])\n corr = cov/(y_sig * u_sig)\n print(corr, end=\" \")\n print(\"YU\")\n\n #YV\n exp = 0\n for i in range(0, b.shape[0]):\n for j in range(0, b.shape[1]):\n exp = exp + ((y[i, j] - y_mean) * (v[i, j] - v_mean))\n cov = exp/(b.shape[0] * b.shape[1])\n corr = cov/(y_sig * v_sig)\n print(corr, end=\" \")\n print(\"YV\")\n\n #UV\n exp = 0\n for i in range(0, b.shape[0]):\n for j in range(0, b.shape[1]):\n exp = exp + ((u[i, j] - u_mean) * (v[i, j] - v_mean))\n cov = exp/(b.shape[0] * b.shape[1])\n corr = cov/(u_sig * v_sig)\n print(corr, end=\" \")\n print(\"UV\")\n\n print(\"----------------------------------\" + fileName)", "sub_path": "HW#11/RGB_YUV_form.py", "file_name": "RGB_YUV_form.py", "file_ext": "py", "file_size_in_byte": 2865, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.listdir", "line_number": 6, "usage_type": "call"}, {"api_name": "os.path.splitext", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 9, "usage_type": "call"}, {"api_name": "cv2.IMREAD_COLOR", "line_number": 9, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2YUV", "line_number": 11, "usage_type": "attribute"}, {"api_name": "numpy.uint64", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.uint64", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.split", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.split", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 22, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 23, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 24, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 28, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 29, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.var", "line_number": 32, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 33, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 34, "usage_type": "call"}, {"api_name": "math.sqrt", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "458585329", "text": "\n# coding=utf-8\n\n__author__ = 'Kevin_Liao'\n__all__ = ['DataLink']\n\nimport time\n# from driver.logstack import LogPacket\nfrom driver.i2cstack import I2CPacket, I2CHeader\nfrom driver.crtpstack import CRTPPacket, CRTPHeader, CRTPPort\n# from ui.widgets.filesetting import FileConfig\nfrom driver.logger import Logger\nfrom lib import Library\n\nlogger = Logger(__name__)\nlib = Library()\n\nclass DataLink:\n\n __metaclass__ = lib.singleton()\n linkTimeoutCb = lib.caller()\n\n def __init__(self, parent, dongle = None):\n self.parent = parent\n self.parent.datalink = self\n self.setSpeed(0.001)\n self.setDongle(dongle) # 設定傳輸介面,ex: serial\n # Queue buffer\n self.outputThread = lib.queue(self, self.speed, self.output, 50) # 定時處理輸出資料\n self.inputThread = lib.queue(self, self.speed, self.input, 50) # 定時處理輸入資料 \n self.dongleThread = lib.timer(self, self.speed, self.dongleHandle, 50) # 定時接收 Dongle 的資料\n self.modulelink = {} # Module 連結\n\n # self.packet = lib.packet()\n self.crtpSend = lib.crtp()\n self.crtpReceive = lib.crtp()\n # self.packet.rfile.pull\n \n\n \n \n # self.packet.rfile.create(\"log/aaaa.txt\", self.packet.rfile.pull)\n # self.send([1,2,3,4])\n \n # data = \"][aa:123][bbb:245][fsfdfokfdfodkfodkfokdokfdofkodkfdkdofkofkdofk\"\n \n # data1 = lib.format.list(data)\n # self.logPacket.pushLog1(data1)\n # self.logPacket.pushLog1(data1)\n \n \n self.infile = None\n self.outfile = None\n self.stop()\n \n\n # self.testThread = lib.timer(self, 1, self.test, 5)\n # self.testThread.start()\n \"\"\"\n self.testThread.putQueue([1,1,1])\n self.testThread.putQueue([2,2,2])\n self.testThread.putQueue([3,3,3])\n self.testThread.putQueue([4,4,4])\n self.testThread.putQueue([5,5,5])\n self.testThread.putQueue([6,6,6])\n self.testThread.putQueue([7,7,7])\n self.testThread.putQueue([8,8,8])\n \"\"\"\n\n \n def getMenu(self, path):\n logger.error(\"menu: %s\" % path)\n \n def setDongle(self, dongle = None):\n self.dongle = dongle\n \n def getDongle(self):\n return self.dongle\n\n def setPacket(self, packet):\n self.packet = packet\n self.packet.comm.setCallback(self.send)\n \n \n self.packet.rfile.setFeedback(self.packet.rfile.menulist, self.getMenu)\n # self.packet.comm.setCallback(self.packet.comm.pull)\n lib.debug.addFun(self.send, args = [\"socket write\\r\\n\"])\n lib.debug.addFun(self.packet.rfile.create, args = [\"log/aaaa.txt\"])\n lib.debug.addFun(self.packet.rfile.setReadMenu, args = [\"d:/\", 1])\n lib.debug.addFun(self.packet.rfile.create, args = [\"log/log_update_wireok1.txt\"])\n lib.debug.addFun(self.packet.rfile.create, args = [\"log/log_update_wireok1.txt\", \"log/log_update_wireok1.txt\", \"log/log_update_wireok1.txt\"])\n lib.debug.addFun(self.send, args = [lib.format.char([2]) + \"{'bb': 4}\\n\" + lib.format.char([3]) + lib.format.char([2]) + \"{'aaa': 55}\\n\" + lib.format.char([3])])\n lib.debug.addFun(self.packet.push, args = [\"{'aa': 44, 'bb': 4}\\nxxx\\n[11,22]\\nxxx\\n[111,22\"])\n lib.debug.addFun(self.packet.push, args = [\"33,44]\\n\"])\n lib.debug.addFun(self.send, args = [\"help\\n\"])\n lib.debug.addFun(self.send, args = [\"StartYetiOnWifi\\n\"])\n lib.debug.addFun(self.send, args = [\"FctGetVersion\\n\"])\n lib.debug.addFun(lib.object.openWinCommonConsole, args = [\"Bluetooth\"])\n lib.debug.addFun(lib.main.testview, args = [])\n # lib.debug.addFun(self.packet.json.push, args = [\"33,44]\\n\"])\n \n \n \n def initDataLink(self):\n # 傳輸資訊設定\n self.token = [0xCC, 0xAA] # 建立識別碼\n self.tokenLength = len(self.token)\n self.tokenStack = [0x00] * self.tokenLength # 內容識別碼暫存\n self.datalist = [] # 內容暫存\n self.datalength = 0\n self.validToken = False\n # 計算資料 fps\n self.datainfps = 0\n self.dataincount = 0\n self.datainfpstime = int(lib.config.timer())\n self.dataoutfps = 0\n self.dataoutcount = 0\n self.dataoutfpstime = int(lib.config.timer())\n lib.status.changeFps(self.datainfps, self.dataoutfps)\n # timeout 設定\n self.error = 0\n self.linktime = lib.config.timer() # 資料補送 timeout\n self.timeout = False\n self.maxdatasize = 2000 # 每筆最大資料大小\n self.maxlinktime = 0.2 # 單位秒\n self.offsetbyte = self.crtpReceive.hbyte + self.crtpReceive.ibyte\n self.sbyte = self.offsetbyte + self.crtpReceive.sbyte\n\n def start(self): # 開始收發資料\n if lib.cfg.get(\"loginsave\", False): self.infile = lib.file(lib.cfg.get(\"login\", \"login.txt\", \"log\"), \"w\")\n else: self.infile = None\n if lib.cfg.get(\"logoutsave\", False): self.outfile = lib.file(lib.cfg.get(\"login\", \"logout.txt\", \"log\"), \"w\")\n else: self.outfile = None\n self.initDataLink()\n self.outputThread.start()\n self.inputThread.start()\n self.dongleThread.start()\n\n def stop(self): # 停止收發資料\n self.outputThread.stop()\n self.inputThread.stop()\n self.dongleThread.stop()\n self.initDataLink()\n if self.infile != None: self.infile.close()\n if self.outfile != None: self.outfile.close()\n\n def setTimeoutCb(self, callback):\n if callback != None:\n self.linkTimeoutCb.addCallback(callback)\n \n def checkTimeout(self):\n if self.timeout:\n try: self.linkTimeoutCb.call()\n except: pass\n return self.timeout\n\n def setSpeed(self, speed = 0.01):\n self.speed = speed\n \n def createDataLink(self, module): # 建立模組連線\n linklist = module.getModuleLink() # 取得模組 Link Data\n for link in linklist: # Link Port\n receivelist = self.modulelink.get(link, []) # 接收資料後傳遞給各模組\n try:\n receivelist.append(module) # 嘗試加入 module receivelink\n self.modulelink[link] = receivelist\n except: pass\n try:\n module.sendlinkcb.addCallback(self.send) # 嘗試加入 module sendlinkcb # 嘗試加入 module sendlinkcb\n except: pass\n\n def send(self, packet, log = None):\n if packet != None:\n self.outputThread.putQueue(packet)\n # logger.info(\"p: %s, %s\" % (packet, log))\n \n # if log and self.crtplink: self.setConsole(lib.format.copy(packet), log = True, input = False)\n # else: self.outputThread.putQueue(packet)\n\n def receive(self, link, packet): # 分配資料到各模組\n if link in self.modulelink:\n modulelist = self.modulelink[link] # 依 link 讀取每個接收模組\n for module in modulelist:\n if module.enabled:\n module.receivelink(packet)\n\n def input(self, packet): # 定時取出 Queue 資料\n crtp = self.crtpReceive\n [self.datainfpstime, self.dataincount, self.datainfps] = lib.config.fps(self.datainfpstime, self.dataincount)\n if self.datainfps != 0: lib.status.changeFps(self.datainfps, self.dataoutfps)\n # logger.info(packet)\n crtp.setPacket(packet, init = True)\n if crtp.port == crtp.console:\n stream = self.packet.push(crtp.data)\n crtp.setStream(stream) # 轉換 log 封包\n elif crtp.port == crtp.comm:\n self.packet.comm.receive(crtp.channel, crtp.packet)\n elif crtp.port == crtp.parame:\n pass\n elif crtp.port == crtp.log:\n pass\n elif crtp.port == crtp.game:\n pass\n # logger.info(\"-----port: %d\" % crtp.port)\n # logger.info(\"ch: %d\" % crtp.channel)\n # if crtp.stream != None: logger.info(\"stream: %s\" % crtp.stream)\n # logger.debug(\"data: %s\" % str(crtp.data))\n # logger.info(\"datal: %s\" % crtp.datal)\n self.receive(crtp.port, crtp)\n self.receive(crtp.all, crtp)\n\n def output(self, packet): # 定時取出 Queue 資料\n crtp = self.crtpSend\n [self.dataoutfpstime, self.dataoutcount, self.dataoutfps] = lib.config.fps(self.dataoutfpstime, self.dataoutcount)\n if crtp.verify(packet): link = True\n else: link = False\n datalist = self.encoder(packet, link)\n if self.dongle != None:\n try: self.dongle.write(lib.format.char(datalist))\n except Exception as e:\n logger.info(\"W: %s\" % e)\n pass\n\n def setConsole(self, data, log = False, input = True):\n if len(data):\n if data[-1] == 10: # 碰到換行轉換為 Console 發送\n crtp = lib.crtp()\n crtp.setDataList(crtp.console, 1, data, 0)\n elif log == True or len(data) >= self.maxdatasize: # 單筆超過一定資料量為 Logging 發送\n crtp = lib.crtp()\n crtp.setDataList(crtp.console, 2, data, 0)\n else:\n return\n if input: self.inputThread.putQueue(crtp.packet)\n else: self.outputThread.putQueue(crtp.packet)\n lib.config.delete(data)\n\n def decoder(self, rawdata): # datalink 解碼\n rawdata = lib.format.list(rawdata)\n for data in rawdata:\n self.tokenStack[0:self.tokenLength-1] = self.tokenStack[1:self.tokenLength] # 將 byte1 byteN 資料往前取代 byte0 byteN-1\n self.tokenStack[self.tokenLength-1] = data # 將最新 data 取代原本 byte2\n if self.tokenStack == self.token: # 檢查是否吻合���頭 token 資料\n self.datalist = [] # 清空資料\n self.validToken = True # 是 datalink 模式\n self.datalength = 0\n self.datasize = []\n elif self.validToken == True:\n self.datalist.append(data) # 塞入資料\n length = len(self.datalist) # 計算 data 長度\n if length <= self.offsetbyte: # 擷取 header + index\n pass\n elif length <= self.sbyte: # 擷取 size\n self.datasize.append(data)\n datasize = lib.format.getByte(self.datasize)\n self.datalength = self.sbyte + datasize # header + index + size + datasize\n elif length <= self.datalength: # 擷取數據\n pass\n else: # 擷取 checksum\n self.validToken = False # 取消擷取資料\n checksum = lib.format.checksum(self.datalist[:-1])\n if checksum == self.datalist[self.datalength]: # 比對 checksum 確認資料是否正確\n self.datalist = self.datalist[0:-1]\n self.inputThread.putQueue(self.datalist)\n self.datalist = []\n else: # 如果是非 CRTP 封包,設定為 Console 或是 Log 模式\n self.datalist.append(data)\n self.setConsole(self.datalist) \n\n def encoder(self, rawdata, link = False): # datalink 編碼\n rawdata = lib.format.copy(lib.format.list(rawdata))\n if link: # 建立識別碼 + 資料 + checksum\n checksum = lib.format.checksum(rawdata)\n datalist = lib.format.copy(self.token) + rawdata + [checksum]\n else: # 只傳送原始資料\n datalist = rawdata\n return datalist\n\n def dongleHandle(self): # 接收來自於 Dongle 資料 \n if self.dongle != None:\n [status, data] = [None, None]\n try: [status, data] = self.dongle.read() # status, None = timeout, True = with token, False = without token\n except Exception as e:\n logger.info(\"dongleError: %s\" % e)\n self.dongleError()\n if status == None: # 當狀態為 None 時即 timeout\n self.timeout = True\n self.checkTimeout()\n elif self.checkTimeout() == False and data != None and len(data):\n self.linktime = lib.config.timer()\n if status == True: self.decoder(data) # if status = True 則有包含識別碼 token,if status = False 則不包含識別碼 token\n else: self.inputThread.putQueue(lib.format.list(data))\n elif lib.config.timer() - self.linktime > self.maxlinktime and len(self.datalist): # 當超過 timeout 時間,剩餘資料直接輸入\n self.setConsole(self.datalist, log = True)\n elif len(self.datalist) == 0:\n self.linktime = lib.config.timer()\n\n def dongleError(self):\n if self.error >= 10:\n logger.error(self.error)\n self.setConsole(self.datalist, log = True) # dongle 發生錯誤時將剩餘資料以 Log 模式輸入\n self.timeout = True\n else:\n self.error = self.error + 1 # 累計錯誤\n \n def close(self):\n self.outputThread.close()\n self.inputThread.close()\n self.dongleThread.close()", "sub_path": "source/ui/comms/datalink.py", "file_name": "datalink.py", "file_ext": "py", "file_size_in_byte": 16450, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "driver.logger.Logger", "line_number": 15, "usage_type": "call"}, {"api_name": "lib.Library", "line_number": 16, "usage_type": "call"}, {"api_name": "lib.singleton", "line_number": 20, "usage_type": "call"}, {"api_name": "lib.caller", "line_number": 21, "usage_type": "call"}, {"api_name": "lib.queue", "line_number": 29, "usage_type": "call"}, {"api_name": "lib.queue", "line_number": 30, "usage_type": "call"}, {"api_name": "lib.timer", "line_number": 31, "usage_type": "call"}, {"api_name": "lib.crtp", "line_number": 35, "usage_type": "call"}, {"api_name": "lib.crtp", "line_number": 36, "usage_type": "call"}, {"api_name": "lib.debug.addFun", "line_number": 87, "usage_type": "call"}, {"api_name": "lib.debug", "line_number": 87, "usage_type": "attribute"}, {"api_name": "lib.debug.addFun", "line_number": 88, "usage_type": "call"}, {"api_name": "lib.debug", "line_number": 88, "usage_type": "attribute"}, {"api_name": "lib.debug.addFun", "line_number": 89, "usage_type": "call"}, {"api_name": "lib.debug", "line_number": 89, "usage_type": "attribute"}, {"api_name": "lib.debug.addFun", "line_number": 90, "usage_type": "call"}, {"api_name": "lib.debug", "line_number": 90, "usage_type": "attribute"}, {"api_name": "lib.debug.addFun", "line_number": 91, "usage_type": "call"}, {"api_name": "lib.debug", "line_number": 91, "usage_type": "attribute"}, {"api_name": "lib.debug.addFun", "line_number": 92, "usage_type": "call"}, {"api_name": "lib.debug", "line_number": 92, "usage_type": "attribute"}, {"api_name": "lib.format.char", "line_number": 92, "usage_type": "call"}, {"api_name": "lib.format", "line_number": 92, "usage_type": "attribute"}, {"api_name": "lib.debug.addFun", "line_number": 93, "usage_type": "call"}, {"api_name": "lib.debug", "line_number": 93, "usage_type": "attribute"}, {"api_name": "lib.debug.addFun", "line_number": 94, "usage_type": "call"}, {"api_name": "lib.debug", "line_number": 94, "usage_type": "attribute"}, {"api_name": "lib.debug.addFun", "line_number": 95, "usage_type": "call"}, {"api_name": "lib.debug", "line_number": 95, "usage_type": "attribute"}, {"api_name": "lib.debug.addFun", "line_number": 96, "usage_type": "call"}, {"api_name": "lib.debug", "line_number": 96, "usage_type": "attribute"}, {"api_name": "lib.debug.addFun", "line_number": 97, "usage_type": "call"}, {"api_name": "lib.debug", "line_number": 97, "usage_type": "attribute"}, {"api_name": "lib.debug.addFun", "line_number": 98, "usage_type": "call"}, {"api_name": "lib.debug", "line_number": 98, "usage_type": "attribute"}, {"api_name": "lib.object", "line_number": 98, "usage_type": "attribute"}, {"api_name": "lib.debug.addFun", "line_number": 99, "usage_type": "call"}, {"api_name": "lib.debug", "line_number": 99, "usage_type": "attribute"}, {"api_name": "lib.main", "line_number": 99, "usage_type": "attribute"}, {"api_name": "lib.config.timer", "line_number": 115, "usage_type": "call"}, {"api_name": "lib.config", "line_number": 115, "usage_type": "attribute"}, {"api_name": "lib.config.timer", "line_number": 118, "usage_type": "call"}, {"api_name": "lib.config", "line_number": 118, "usage_type": "attribute"}, {"api_name": "lib.status.changeFps", "line_number": 119, "usage_type": "call"}, {"api_name": "lib.status", "line_number": 119, "usage_type": "attribute"}, {"api_name": "lib.config.timer", "line_number": 122, "usage_type": "call"}, {"api_name": "lib.config", "line_number": 122, "usage_type": "attribute"}, {"api_name": "lib.cfg.get", "line_number": 130, "usage_type": "call"}, {"api_name": "lib.cfg", "line_number": 130, "usage_type": "attribute"}, {"api_name": "lib.file", "line_number": 130, "usage_type": "call"}, {"api_name": "lib.cfg.get", "line_number": 132, "usage_type": "call"}, {"api_name": "lib.cfg", "line_number": 132, "usage_type": "attribute"}, {"api_name": "lib.file", "line_number": 132, "usage_type": "call"}, {"api_name": "lib.config.fps", "line_number": 189, "usage_type": "call"}, {"api_name": "lib.config", "line_number": 189, "usage_type": "attribute"}, {"api_name": "lib.status.changeFps", "line_number": 190, "usage_type": "call"}, {"api_name": "lib.status", "line_number": 190, "usage_type": "attribute"}, {"api_name": "lib.config.fps", "line_number": 214, "usage_type": "call"}, {"api_name": "lib.config", "line_number": 214, "usage_type": "attribute"}, {"api_name": "lib.format.char", "line_number": 219, "usage_type": "call"}, {"api_name": "lib.format", "line_number": 219, "usage_type": "attribute"}, {"api_name": "lib.crtp", "line_number": 227, "usage_type": "call"}, {"api_name": "lib.crtp", "line_number": 230, "usage_type": "call"}, {"api_name": "lib.config.delete", "line_number": 236, "usage_type": "call"}, {"api_name": "lib.config", "line_number": 236, "usage_type": "attribute"}, {"api_name": "lib.format.list", "line_number": 239, "usage_type": "call"}, {"api_name": "lib.format", "line_number": 239, "usage_type": "attribute"}, {"api_name": "lib.format.getByte", "line_number": 255, "usage_type": "call"}, {"api_name": "lib.format", "line_number": 255, "usage_type": "attribute"}, {"api_name": "lib.format.checksum", "line_number": 261, "usage_type": "call"}, {"api_name": "lib.format", "line_number": 261, "usage_type": "attribute"}, {"api_name": "lib.format.copy", "line_number": 271, "usage_type": "call"}, {"api_name": "lib.format", "line_number": 271, "usage_type": "attribute"}, {"api_name": "lib.format.list", "line_number": 271, "usage_type": "call"}, {"api_name": "lib.format.checksum", "line_number": 273, "usage_type": "call"}, {"api_name": "lib.format", "line_number": 273, "usage_type": "attribute"}, {"api_name": "lib.format.copy", "line_number": 274, "usage_type": "call"}, {"api_name": "lib.format", "line_number": 274, "usage_type": "attribute"}, {"api_name": "lib.config.timer", "line_number": 290, "usage_type": "call"}, {"api_name": "lib.config", "line_number": 290, "usage_type": "attribute"}, {"api_name": "lib.format.list", "line_number": 292, "usage_type": "call"}, {"api_name": "lib.format", "line_number": 292, "usage_type": "attribute"}, {"api_name": "lib.config.timer", "line_number": 293, "usage_type": "call"}, {"api_name": "lib.config", "line_number": 293, "usage_type": "attribute"}, {"api_name": "lib.config.timer", "line_number": 296, "usage_type": "call"}, {"api_name": "lib.config", "line_number": 296, "usage_type": "attribute"}]} +{"seq_id": "250159998", "text": "import os\nimport random\nfrom pathlib import Path\n\nimport librosa\nimport numpy as np\nimport time\nimport torch\nfrom scipy.io import wavfile\n\nfrom torch_audiomentations import PolarityInversion\n\nSAMPLE_RATE = 16000\n\nBASE_DIR = Path(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))\nSCRIPTS_DIR = BASE_DIR / \"scripts\"\nTEST_FIXTURES_DIR = BASE_DIR / \"test_fixtures\"\n\n\nclass timer(object):\n \"\"\"\n timer: A class used to measure the execution time of a block of code that is\n inside a \"with\" statement.\n\n Example:\n\n ```\n with timer(\"Count to 500000\"):\n x = 0\n for i in range(500000):\n x += 1\n print(x)\n ```\n\n Will output:\n 500000\n Count to 500000: 0.04 s\n\n Warning: The time resolution used here may be limited to 1 ms\n \"\"\"\n\n def __init__(self, description=\"Execution time\", verbose=False):\n self.description = description\n self.verbose = verbose\n self.execution_time = None\n\n def __enter__(self):\n self.t = time.time()\n return self\n\n def __exit__(self, type, value, traceback):\n self.execution_time = time.time() - self.t\n if self.verbose:\n print(\"{}: {:.3f} s\".format(self.description, self.execution_time))\n\n\nif __name__ == \"__main__\":\n \"\"\"\n For each transformation, apply it to an example sound and write the transformed sounds to\n an output folder. Also crudely measure and print execution time.\n \"\"\"\n output_dir = os.path.join(SCRIPTS_DIR, \"output\")\n os.makedirs(output_dir, exist_ok=True)\n\n np.random.seed(42)\n random.seed(42)\n\n samples, _ = librosa.load(\n os.path.join(TEST_FIXTURES_DIR, \"acoustic_guitar_0.wav\"), sr=SAMPLE_RATE\n )\n\n transforms = [{\"instance\": PolarityInversion(p=1.0), \"num_runs\": 1}]\n\n execution_times = {}\n\n for transform in transforms:\n augmenter = transform[\"instance\"]\n run_name = (\n transform.get(\"name\")\n if transform.get(\"name\")\n else transform[\"instance\"].__class__.__name__\n )\n execution_times[run_name] = []\n for i in range(transform[\"num_runs\"]):\n output_file_path = os.path.join(\n output_dir, \"{}_{:03d}.wav\".format(run_name, i)\n )\n with timer() as t:\n augmented_samples = augmenter(\n samples=torch.from_numpy(samples), sample_rate=SAMPLE_RATE\n ).numpy()\n execution_times[run_name].append(t.execution_time)\n wavfile.write(output_file_path, rate=SAMPLE_RATE, data=augmented_samples)\n\n for run_name in execution_times:\n if len(execution_times[run_name]) > 1:\n print(\n \"{:<32} {:.3f} s (std: {:.3f} s)\".format(\n run_name,\n np.mean(execution_times[run_name]),\n np.std(execution_times[run_name]),\n )\n )\n else:\n print(\n \"{:<32} {:.3f} s\".format(run_name, np.mean(execution_times[run_name]))\n )\n", "sub_path": "scripts/demo.py", "file_name": "demo.py", "file_ext": "py", "file_size_in_byte": 3063, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pathlib.Path", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 15, "usage_type": "call"}, {"api_name": "time.time", "line_number": 48, "usage_type": "call"}, {"api_name": "time.time", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 65, "usage_type": "attribute"}, {"api_name": "random.seed", "line_number": 66, "usage_type": "call"}, {"api_name": "librosa.load", "line_number": 68, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "torch_audiomentations.PolarityInversion", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path", "line_number": 85, "usage_type": "attribute"}, {"api_name": "torch.from_numpy", "line_number": 90, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.write", "line_number": 93, "usage_type": "call"}, {"api_name": "scipy.io.wavfile", "line_number": 93, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 106, "usage_type": "call"}]} +{"seq_id": "535011072", "text": "from django.conf.urls.defaults import patterns, url\n\nurlpatterns = patterns('unfriendly.views',\n # Mostly unfriendly URL (but with SEO juice).\n url(r'^(?P.+)/(?P.+)/$', 'deobfuscate',\n name='unfriendly-deobfuscate'),\n\n # Extremely unfriendly URL (no SEO juice).\n url(r'^(?P.+)/$', 'deobfuscate', name='unfriendly-deobfuscate'),\n)\n", "sub_path": "unfriendly/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 366, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.conf.urls.defaults.patterns", "line_number": 3, "usage_type": "call"}, {"api_name": "django.conf.urls.defaults.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.defaults.url", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "286396519", "text": "from datetime import datetime\n\nimport argparse\nfrom pymongo import MongoClient\nfrom mongo_uri import mongo_uri\nclient = MongoClient(mongo_uri)\ndb = client['mack0242']\nstart = datetime(2012, 1, 1, 0, 0, 0)\nend = datetime(2015, 12, 12, 23, 59)\ndrange = {'$gte': start, '$lte': end}\n\n\ndef get_location(intersection):\n return db['locations'].find_one({'site_no': intersection})\n\n\ndef get_data(intersection, ds):\n if ds == 'sm':\n return db['scats_sm'].find({'site_no': intersection, 'datetime': drange}).sort({'sequence:1'})\n elif ds == 'vs':\n return db['scats_readings'].find({'site_no': intersection, 'datetime': drange}).sort('datetime')\n\n\ndef run(data, location, method,si, ds):\n if method == 'htm':\n from htm_model import run_model\n run_model(db['scats_anomalies'], data, location, si, ds)\n elif method == 's-h-esd':\n from shesd import run_model\n else:\n from sesd import run_model\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('method')\n parser.add_argument('intersection')\n parser.add_argument('dataset', choices=['sm', 'vs'])\n parser.add_argument('si',)\n args = parser.parse_args()\n location = get_location(args.intersection)\n data = get_data(args.intersection, args.dataset)\n\n run(data, location, args.method,args.si, args.dataset)\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1362, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pymongo.MongoClient", "line_number": 6, "usage_type": "call"}, {"api_name": "mongo_uri.mongo_uri", "line_number": 6, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 8, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 9, "usage_type": "call"}, {"api_name": "htm_model.run_model", "line_number": 27, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "422343331", "text": "from fileman.experiment_record import run_experiment\nfrom general.test_mode import is_test_mode, set_test_mode\nfrom plato.tools.cost import softmax_negative_log_likelihood\nfrom plato.tools.networks import MultiLayerPerceptron, normal_w_init\nfrom plato.tools.online_prediction.online_predictors import GradientBasedPredictor\nfrom plato.tools.optimizers import SimpleGradientDescent, AdaMax\nfrom utils.benchmarks.plot_learning_curves import plot_learning_curves\nfrom utils.benchmarks.predictor_comparison import compare_predictors\nfrom utils.benchmarks.train_and_test import percent_argmax_correct\nfrom utils.datasets.mnist import get_mnist_dataset\nfrom utils.tools.mymath import sqrtspace\n\n__author__ = 'peter'\n\n\n\"\"\"\nHere we run a comparison between SGD and Durk's new pet: AdaMax. We run them both on MNIST for 50 epochs.\n\"\"\"\n\n\ndef mnist_adamax_showdown(hidden_size = 300, n_epochs = 10, n_tests = 20):\n\n dataset = get_mnist_dataset()\n\n if is_test_mode():\n dataset.shorten(200)\n n_epochs = 0.1\n n_tests = 3\n\n make_mlp = lambda optimizer: GradientBasedPredictor(\n function = MultiLayerPerceptron(\n layer_sizes=[hidden_size, dataset.n_categories],\n input_size = dataset.input_size,\n hidden_activation='sig',\n output_activation='lin',\n w_init = normal_w_init(mag = 0.01, seed = 5)\n ),\n cost_function = softmax_negative_log_likelihood,\n optimizer = optimizer,\n ).compile()\n\n return compare_predictors(\n dataset=dataset,\n online_predictors = {\n 'sgd': make_mlp(SimpleGradientDescent(eta = 0.1)),\n 'adamax': make_mlp(AdaMax(alpha = 1e-3)),\n },\n minibatch_size = 20,\n test_epochs = sqrtspace(0, n_epochs, n_tests),\n evaluation_function = percent_argmax_correct\n )\n\n\ndef mlp_normalization(hidden_size = 300, n_epochs = 30, n_tests = 50, minibatch_size=20):\n \"\"\"\n Compare mlps with different schemes for normalizing input.\n\n regular: Regular vanilla MLP\n normalize: Mean-subtract/normalize over minibatch\n normalize and scale: Mean-subtract/normalize over minibatch AND multiply by a trainable\n (per-unit) scale parameter.\n\n Conclusions: No significant benefit to scale parameter. Normalizing gives\n a head start but incurs a small cost later on. But really all classifiers are quite similar.\n\n :param hidden_size: Size of hidden layer\n \"\"\"\n dataset = get_mnist_dataset()\n\n if is_test_mode():\n dataset.shorten(200)\n n_epochs = 0.1\n n_tests = 3\n\n make_mlp = lambda normalize, scale: GradientBasedPredictor(\n function = MultiLayerPerceptron(\n layer_sizes=[hidden_size, dataset.n_categories],\n input_size = dataset.input_size,\n hidden_activation='sig',\n output_activation='lin',\n normalize_minibatch=normalize,\n scale_param=scale,\n w_init = normal_w_init(mag = 0.01, seed = 5)\n ),\n cost_function = softmax_negative_log_likelihood,\n optimizer = SimpleGradientDescent(eta = 0.1),\n ).compile()\n\n return compare_predictors(\n dataset=dataset,\n online_predictors = {\n 'regular': make_mlp(normalize = False, scale = False),\n 'normalize': make_mlp(normalize=True, scale = False),\n 'normalize and scale': make_mlp(normalize=True, scale = True),\n },\n minibatch_size = minibatch_size,\n test_epochs = sqrtspace(0, n_epochs, n_tests),\n evaluation_function = percent_argmax_correct\n )\n\n\ndef run_and_plot(training_scheme):\n learning_curves = training_scheme()\n plot_learning_curves(learning_curves)\n\n\ndef get_experiments():\n training_schemes = {\n 'adamax-showdown': mnist_adamax_showdown,\n 'mlp-normalization': mlp_normalization\n }\n experiments = {name: lambda sc=scheme: run_and_plot(sc) for name, scheme in training_schemes.iteritems()}\n return experiments\n\n\nif __name__ == '__main__':\n\n test_mode = False\n experiment = 'mlp-normalization'\n\n set_test_mode(test_mode)\n run_experiment(experiment, exp_dict=get_experiments(), show_figs = True, print_to_console=True)\n", "sub_path": "plato/examples/demo_compare_optimizers.py", "file_name": "demo_compare_optimizers.py", "file_ext": "py", "file_size_in_byte": 4349, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "utils.datasets.mnist.get_mnist_dataset", "line_number": 23, "usage_type": "call"}, {"api_name": "general.test_mode.is_test_mode", "line_number": 25, "usage_type": "call"}, {"api_name": "plato.tools.online_prediction.online_predictors.GradientBasedPredictor", "line_number": 30, "usage_type": "call"}, {"api_name": "plato.tools.networks.MultiLayerPerceptron", "line_number": 31, "usage_type": "call"}, {"api_name": "plato.tools.networks.normal_w_init", "line_number": 36, "usage_type": "call"}, {"api_name": "plato.tools.cost.softmax_negative_log_likelihood", "line_number": 38, "usage_type": "name"}, {"api_name": "utils.benchmarks.predictor_comparison.compare_predictors", "line_number": 42, "usage_type": "call"}, {"api_name": "plato.tools.optimizers.SimpleGradientDescent", "line_number": 45, "usage_type": "call"}, {"api_name": "plato.tools.optimizers.AdaMax", "line_number": 46, "usage_type": "call"}, {"api_name": "utils.tools.mymath.sqrtspace", "line_number": 49, "usage_type": "call"}, {"api_name": "utils.benchmarks.train_and_test.percent_argmax_correct", "line_number": 50, "usage_type": "name"}, {"api_name": "utils.datasets.mnist.get_mnist_dataset", "line_number": 68, "usage_type": "call"}, {"api_name": "general.test_mode.is_test_mode", "line_number": 70, "usage_type": "call"}, {"api_name": "plato.tools.online_prediction.online_predictors.GradientBasedPredictor", "line_number": 75, "usage_type": "call"}, {"api_name": "plato.tools.networks.MultiLayerPerceptron", "line_number": 76, "usage_type": "call"}, {"api_name": "plato.tools.networks.normal_w_init", "line_number": 83, "usage_type": "call"}, {"api_name": "plato.tools.cost.softmax_negative_log_likelihood", "line_number": 85, "usage_type": "name"}, {"api_name": "plato.tools.optimizers.SimpleGradientDescent", "line_number": 86, "usage_type": "call"}, {"api_name": "utils.benchmarks.predictor_comparison.compare_predictors", "line_number": 89, "usage_type": "call"}, {"api_name": "utils.tools.mymath.sqrtspace", "line_number": 97, "usage_type": "call"}, {"api_name": "utils.benchmarks.train_and_test.percent_argmax_correct", "line_number": 98, "usage_type": "name"}, {"api_name": "utils.benchmarks.plot_learning_curves.plot_learning_curves", "line_number": 104, "usage_type": "call"}, {"api_name": "general.test_mode.set_test_mode", "line_number": 121, "usage_type": "call"}, {"api_name": "fileman.experiment_record.run_experiment", "line_number": 122, "usage_type": "call"}]} +{"seq_id": "133776379", "text": "from typing import List\n\n\nclass Solution:\n # Merge Sort\n def sortArray(self, nums: List[int]) -> List[int]:\n if len(nums) <= 1:\n return nums\n\n pivot = int(len(nums) / 2)\n left = self.sortArray(nums[:pivot])\n right = self.sortArray(nums[pivot:])\n\n return self.merge(left, right)\n\n def merge(self, left: List[int], right: List[int]) -> List[int]:\n result = []\n lp = 0\n rp = 0\n\n while lp < len(left) and rp < len(right):\n if left[lp] < right[rp]:\n result.append(left[lp])\n lp += 1\n else:\n result.append(right[rp])\n rp += 1\n\n # append remaining\n result.extend(left[lp:])\n result.extend(right[rp:])\n\n return result\n", "sub_path": "0912_Sort_Array.py", "file_name": "0912_Sort_Array.py", "file_ext": "py", "file_size_in_byte": 803, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "typing.List", "line_number": 6, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 16, "usage_type": "name"}]} +{"seq_id": "65982421", "text": "from django.conf.urls import patterns\nfrom django.conf.urls import url\n\nurlpatterns = patterns('geoforms.views',\n url(r'^active/$',\n 'get_active_questionnaires',\n name='active_questionnaires'),\n url(r'^(?P[\\w+(+-_)*]+)/feedback$',\n 'feedback',\n name=\"feedback\"),\n url(r'^(?P[\\w+(+-_)*]+)/$',\n 'questionnaire',\n name=\"questionnaire\"),\n )\n", "sub_path": "geoforms/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 431, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 4, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 5, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "573387523", "text": "import argparse\r\nimport sys\r\nimport os\r\nimport cv2\r\n\r\nfrom os import getcwd\r\nfrom os.path import join\r\nfrom FaceCropAlign import align_and_crop_face\r\n\r\n# construct the argument parser and parse the arguments\r\nap = argparse.ArgumentParser()\r\nap.add_argument(\"-i\", \"--input_dirs\", required=True,\r\n help=\"directory path to input images\")\r\nap.add_argument(\"-o\", \"--output_dirs\", required=True,\r\n help=\"directory path to output images\")\r\nargs = vars(ap.parse_args())\r\n\r\n# constant\r\ninput_dirs = join(os.getcwd(), args[\"input_dirs\"])\r\noutput_dirs = join(os.getcwd(), args[\"output_dirs\"])\r\n\r\n# Iterate all images in input_dirs\r\nimage_exts = ['jpg', 'png']\r\nfile_list = [fn for fn in os.listdir(input_dirs)\r\n if fn.split(\".\")[-1] in image_exts]\r\nfor file_name in file_list:\r\n # Read\r\n input_path = join(input_dirs, file_name)\r\n print(\"Read \", input_path, \"...\")\r\n image = cv2.imread(input_path)\r\n\r\n # Process\r\n outputs = align_and_crop_face(image)\r\n\r\n # Write\r\n for i, output in enumerate(outputs):\r\n splited_file_name = file_name.split(\".\")\r\n only_name, ext = splited_file_name[0], '.'.join(splited_file_name[1:])\r\n output_path = join(output_dirs, '%s_%d.%s' % (only_name, i, ext))\r\n print(\"Write \", output_path, \"...\")\r\n cv2.imwrite(output_path, output)\r\n", "sub_path": "util/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 1350, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 11, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 19, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 19, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 20, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 20, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 30, "usage_type": "call"}, {"api_name": "FaceCropAlign.align_and_crop_face", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "80169410", "text": "import os\nimport re\nimport tempfile\n\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom fnmatch import fnmatch\nfrom subprocess import PIPE, Popen, call\n\nfrom tabimport import CSVImportedFile, FileFactory\n\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.core.files import File\nfrom django.db import IntegrityError, transaction\nfrom django.db.models import Value\nfrom django.db.models.functions import Concat\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404\nfrom django.urls import reverse\nfrom django.views.generic import FormView\n\nfrom candidats.models import Candidate\nfrom ..forms import StudentImportForm, UploadHPFileForm, UploadReportForm\nfrom ..models import (\n Corporation, CorpContact, Course, Klass, Option, Section, Student, Teacher,\n)\nfrom ..utils import is_int\n\n\nclass ImportViewBase(FormView):\n template_name = 'file_import.html'\n\n @staticmethod\n def _sanitize_date(txt):\n if txt == '':\n return None\n elif isinstance(txt, str):\n return datetime.strptime(txt, '%d.%m.%Y').date()\n\n def form_valid(self, form):\n upfile = form.cleaned_data['upload']\n is_csv = (\n upfile.name.endswith('.csv') or\n 'csv' in upfile.content_type or\n upfile.content_type == 'text/plain'\n )\n try:\n if is_csv:\n # Reopen the file in text mode\n upfile = open(upfile.temporary_file_path(), mode='r', encoding='utf-8-sig')\n imp_file = CSVImportedFile(File(upfile))\n else:\n imp_file = FileFactory(upfile)\n with transaction.atomic():\n stats = self.import_data(imp_file)\n except Exception as e:\n if settings.DEBUG:\n raise\n msg = \"L'importation a échoué. Erreur: %s\" % e\n if hasattr(upfile, 'content_type'):\n msg += \" (content-type: %s)\" % upfile.content_type\n messages.error(self.request, msg)\n else:\n non_fatal_errors = stats.get('errors', [])\n if 'created' in stats:\n messages.info(self.request, \"Objets créés : %d\" % stats['created'])\n if 'modified' in stats:\n messages.info(self.request, \"Objets modifiés : %d\" % stats['modified'])\n if non_fatal_errors:\n messages.warning(self.request, \"Erreurs rencontrées:\\n %s\" % \"\\n\".join(non_fatal_errors))\n return HttpResponseRedirect(reverse('admin:index'))\n\n\nclass StudentImportView(ImportViewBase):\n title = \"Importation étudiants EPC\"\n form_class = StudentImportForm\n # Mapping between column names of a tabular file and Student field names\n student_mapping = {\n 'ELE_NUMERO': 'ext_id',\n 'ELE_NOM': 'last_name',\n 'ELE_PRENOM': 'first_name',\n 'ELE_RUE': 'street',\n 'ELE_NPA_LOCALITE': 'city', # pcode is separated from city in prepare_import\n 'ELE_CODE_CANTON': 'district',\n 'ELE_TEL_PRIVE': 'tel',\n 'ELE_TEL_MOBILE': 'mobile',\n 'ELE_EMAIL_RPN': 'email',\n 'ELE_COMPTE_RPN': 'login_rpn',\n 'ELE_DATE_NAISSANCE': 'birth_date',\n 'ELE_AVS': 'avs',\n 'ELE_SEXE': 'gender',\n 'INS_CLASSE': 'klass',\n 'INS_MC': 'teacher',\n 'PROF_DOMAINE_SPEC': 'option_ase',\n }\n corporation_mapping = {\n 'ENT_NUMERO' : 'ext_id',\n 'ENT_NOM' : 'name',\n 'ENT_RUE': 'street',\n 'ENT_NPA': 'pcode',\n 'ENT_LOCALITE': 'city',\n 'ENT_TEL': 'tel',\n 'ENT_CODE_CANTON': 'district',\n }\n mapping_option_ase = {\n 'GEN': 'Généraliste',\n 'Enfants': 'Accompagnement des enfants',\n 'ENF': 'Accompagnement des enfants',\n 'HAN': 'Accompagnement des personnes handicapées',\n 'PAG': 'Accompagnement des personnes âgées',\n }\n # Those values are always taken from the import file\n fields_to_overwrite = ['klass', 'district', 'login_rpn']\n klasses_to_skip = []\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs.update({\n 'file_label': 'Fichier des étudiants',\n 'mandatory_headers': [k for k in self.student_mapping.keys() if k != 'INS_MC'],\n })\n return kwargs\n\n def clean_values(self, values):\n \"\"\"Post-process some of the imported values.\"\"\"\n if 'birth_date' in values:\n values['birth_date'] = self._sanitize_date(values['birth_date'])\n # See if postal code included in city, and split them\n if 'city' in values and is_int(values['city'][:4]):\n values['pcode'], _, values['city'] = values['city'].partition(' ')\n\n if 'klass' in values:\n if values['klass'] == '':\n values['klass'] = None\n else:\n try:\n k = Klass.objects.get(name=values['klass'])\n except Klass.DoesNotExist:\n raise Exception(\"La classe '%s' n'existe pas encore\" % values['klass'])\n values['klass'] = k\n\n if 'option_ase' in values:\n if values['option_ase']:\n try:\n values['option_ase'] = Option.objects.get(name=values['option_ase'])\n except Option.DoesNotExist:\n values['option_ase'] = None\n else:\n values['option_ase'] = None\n return values\n\n @property\n def _existing_students(self):\n return Student.objects.filter(\n archived=False,\n ext_id__isnull=False,\n klass__section__in=[s for s in Section.objects.all() if s.is_EPC]\n )\n\n def update_defaults_from_candidate(self, defaults):\n # Any DoesNotExist exception will bubble up.\n candidate = Candidate.objects.get(last_name=defaults['last_name'],\n first_name=defaults['first_name'])\n # Mix CLOEE data and Candidate data\n if candidate.option in self.mapping_option_ase:\n defaults['option_ase'] = Option.objects.get(name=self.mapping_option_ase[candidate.option])\n if candidate.corporation:\n defaults['corporation'] = candidate.corporation\n defaults['instructor'] = candidate.instructor\n defaults['dispense_ecg'] = candidate.exemption_ecg\n defaults['soutien_dys'] = candidate.handicap\n\n def import_data(self, up_file):\n \"\"\" Import Student data from uploaded file. \"\"\"\n\n def strip(val):\n return val.strip() if isinstance(val, str) else val\n\n obj_created = obj_modified = 0\n err_msg = []\n seen_students_ids = set()\n existing_students_ids = set(\n self._existing_students.values_list('ext_id', flat=True)\n )\n seen_klasses = set()\n prof_dict = {str(t): t for t in Teacher.objects.all()}\n\n for line in up_file:\n student_defaults = {\n val: strip(line.get(key, '')) for key, val in self.student_mapping.items()\n }\n if student_defaults['ext_id'] in seen_students_ids:\n # Second line for student, ignore it\n continue\n for klass in self.klasses_to_skip:\n if fnmatch(student_defaults['klass'], klass):\n continue\n seen_students_ids.add(student_defaults['ext_id'])\n\n if self.corporation_mapping:\n corporation_defaults = {\n val: strip(line[key]) for key, val in self.corporation_mapping.items()\n }\n if isinstance(corporation_defaults['pcode'], float):\n corporation_defaults['pcode'] = int(corporation_defaults['pcode'])\n student_defaults['corporation'] = self.get_corporation(corporation_defaults)\n\n if 'option_ase' in self.fields_to_overwrite:\n if student_defaults['option_ase'] in self.mapping_option_ase:\n student_defaults['option_ase'] = self.mapping_option_ase[student_defaults['option_ase']]\n\n defaults = self.clean_values(student_defaults)\n\n if defaults.get('teacher') and defaults['klass'] not in seen_klasses:\n klass = defaults['klass']\n for full_name in defaults['teacher'].split(', '):\n if 'Secrétariat' in full_name:\n continue\n # Set the teacher for this klass\n try:\n klass.teacher = prof_dict[full_name]\n klass.save()\n except KeyError:\n err_msg.append(\n \"L’enseignant {0} n'existe pas dans la base de données\".format(full_name)\n )\n seen_klasses.add(klass)\n\n try:\n student = Student.objects.get(ext_id=defaults['ext_id'])\n modified = False\n for field_name in self.fields_to_overwrite:\n if getattr(student, field_name) != defaults[field_name]:\n setattr(student, field_name, defaults[field_name])\n modified = True\n if student.archived:\n student.archived = False\n modified = True\n if modified:\n student.save()\n obj_modified += 1\n except Student.DoesNotExist:\n try:\n self.update_defaults_from_candidate(defaults)\n except Candidate.DoesNotExist:\n # New student with no matching Candidate\n err_msg.append('Étudiant non trouvé dans les candidats: {0} {1} - classe: {2}'.format(\n defaults['last_name'],\n defaults['first_name'],\n defaults['klass'])\n )\n\n defaults.pop('teacher', None)\n Student.objects.create(**defaults)\n obj_created += 1\n\n # Archive students who have not been exported\n rest = existing_students_ids - seen_students_ids\n archived = 0\n for student_id in rest:\n st = Student.objects.get(ext_id=student_id)\n st.archived = True\n st.save()\n archived += 1\n return {\n 'created': obj_created, 'modified': obj_modified, 'archived': archived,\n 'errors': err_msg,\n }\n\n def get_corporation(self, corp_values):\n if corp_values['ext_id'] == '':\n return None\n if 'city' in corp_values and is_int(corp_values['city'][:4]):\n corp_values['pcode'], _, corp_values['city'] = corp_values['city'].partition(' ')\n try:\n corp, created = Corporation.objects.get_or_create(\n ext_id=corp_values['ext_id'],\n defaults=corp_values\n )\n except IntegrityError:\n # It may happen that the corporation exists (name and city are enforced unique)\n # but without the ext_id. In that case, we update the ext_id.\n try:\n corp = Corporation.objects.get(name=corp_values['name'], city=corp_values['city'])\n if corp.ext_id:\n raise\n corp.ext_id = corp_values['ext_id']\n corp.save()\n except Corporation.DoesNotExist:\n raise\n except Corporation.MultipleObjectsReturned:\n raise ValueError(\n \"Il existe plusieurs institutions avec le numéro %s (%s, %s)\" % (\n corp_values['ext_id'], corp_values['name'], corp_values['city']\n ))\n return corp\n\n\nclass StudentEsterImportView(StudentImportView):\n title = \"Importation étudiants ESTER\"\n # Mapping between column names of a tabular file and Student field names\n student_mapping = {\n 'ELE_NUMERO': 'ext_id',\n 'ELE_NOM': 'last_name',\n 'ELE_PRENOM': 'first_name',\n 'ELE_RUE': 'street',\n 'ELE_NPA_LOCALITE': 'city', # pcode is separated from city in prepare_import\n 'ELE_DATE_NAISSANCE': 'birth_date',\n 'ELE_AVS': 'avs',\n 'ELE_SEXE': 'gender',\n 'INS_CLASSE': 'klass',\n 'ELE_CODE_CANTON': 'district',\n 'ELE_TEL_PRIVE': 'tel',\n 'ELE_TEL_MOBILE': 'mobile',\n 'ELE_EMAIL_RPN': 'email',\n 'ELE_COMPTE_RPN': 'login_rpn',\n }\n corporation_mapping = None\n # Those values are always taken from the import file\n fields_to_overwrite = ['klass', 'street', 'city','district', 'tel', 'mobile', 'email', 'login_rpn']\n klasses_to_skip = ['1CMS*'] # Abandon classes 1CMS ASE + 1CMS ASSC\n\n @property\n def _existing_students(self):\n return Student.objects.filter(\n archived=False,\n ext_id__isnull=False,\n klass__section__in=[s for s in Section.objects.all() if s.is_ESTER]\n )\n\n def update_defaults_from_candidate(self, defaults):\n pass\n\n\nclass HPImportView(ImportViewBase):\n \"\"\"\n Importation du fichier HyperPlanning pour l'établissement des feuilles\n de charges.\n \"\"\"\n form_class = UploadHPFileForm\n mapping = {\n 'NOMPERSO_ENS': 'teacher',\n 'LIBELLE_MAT': 'subject',\n 'NOMPERSO_DIP': 'public',\n 'TOTAL': 'period',\n }\n # Mapping between klass field and imputation\n account_categories = OrderedDict([\n ('ASAFE', 'ASAFE'),\n ('ASEFE', 'ASEFE'),\n ('ASSCFE', 'ASSCFE'),\n\n ('#Mandat_ASA', 'ASAFE'),\n\n ('MPTS', 'MPTS'),\n ('MPS', 'MPS'),\n ('CMS ASE', 'MPTS'),\n ('CMS ASSC', 'MPS'),\n\n ('EDEpe', 'EDEpe'),\n ('EDEps', 'EDEps'),\n ('EDS', 'EDS'),\n ('CAS_FPP', 'CAS_FPP'),\n\n # To split afterwards\n ('EDE', 'EDE'),\n ('#Mandat_ASE', 'ASE'),\n ('#Mandat_ASSC', 'ASSC'),\n ])\n\n def import_data(self, up_file):\n obj_created = obj_modified = 0\n errors = []\n\n # Pour accélérer la recherche\n profs = {str(t): t for t in Teacher.objects.all()}\n Course.objects.all().delete()\n\n for line in up_file:\n if (line['LIBELLE_MAT'] == '' or line['NOMPERSO_DIP'] == '' or line['TOTAL'] == ''):\n continue\n\n try:\n teacher = profs[line['NOMPERSO_ENS']]\n except KeyError:\n msg = \"Impossible de trouver «%s» dans la liste des enseignant-e-s\" % line['NOMPERSO_ENS']\n if msg not in errors:\n errors.append(msg)\n continue\n\n obj, created = Course.objects.get_or_create(\n teacher=teacher,\n subject=line['LIBELLE_MAT'],\n public=line['NOMPERSO_DIP'],\n )\n\n period = int(float(line['TOTAL'].replace(\"'\", \"\").replace('\\xa0', '')))\n if created:\n obj.period = period\n obj_created += 1\n for k, v in self.account_categories.items():\n if k in obj.public:\n obj.imputation = v\n break\n else:\n obj.period += period\n obj_modified += 1\n obj.save()\n\n if not obj.imputation:\n errors.append(\"Le cours {0} n'a pas pu être imputé correctement!\". format(str(obj)))\n\n return {'created': obj_created, 'modified': obj_modified, 'errors': errors}\n\n\nclass HPContactsImportView(ImportViewBase):\n \"\"\"\n Importation du fichier Hyperplanning contenant les formateurs d'étudiants.\n \"\"\"\n form_class = UploadHPFileForm\n\n def import_data(self, up_file):\n obj_modified = 0\n errors = []\n for idx, line in enumerate(up_file, start=2):\n try:\n student = Student.objects.get(ext_id=int(line['UID_ETU']))\n except Student.DoesNotExist:\n errors.append(\n \"Impossible de trouver l’étudiant avec le numéro %s\" % int(line['UID_ETU'])\n )\n continue\n if not line['NoSIRET']:\n errors.append(\n \"NoSIRET est vide à ligne %d. Ligne ignorée\" % idx\n )\n continue\n try:\n corp = Corporation.objects.get(ext_id=int(line['NoSIRET']))\n except Corporation.DoesNotExist:\n errors.append(\n \"Impossible de trouver l’institution avec le numéro %s\" % int(line['NoSIRET'])\n )\n continue\n\n # Check corporation matches\n if student.corporation_id != corp.pk:\n # This import has priority over the corporation set by StudentImportView\n student.corporation = corp\n student.save()\n\n contact = corp.corpcontact_set.filter(\n first_name__iexact=line['PRENOMMDS'].strip(),\n last_name__iexact=line['NOMMDS'].strip()\n ).first()\n if contact is None:\n contact = CorpContact.objects.create(\n corporation=corp, first_name=line['PRENOMMDS'].strip(),\n last_name=line['NOMMDS'].strip(), civility=line['CIVMDS'], email=line['EMAILMDS']\n )\n else:\n if line['CIVMDS'] and contact.civility != line['CIVMDS']:\n contact.civility = line['CIVMDS']\n contact.save()\n if line['EMAILMDS'] and contact.email != line['EMAILMDS']:\n contact.email = line['EMAILMDS']\n contact.save()\n if student.instructor != contact:\n student.instructor = contact\n student.save()\n obj_modified += 1\n return {'modified': obj_modified, 'errors': errors}\n\n\nclass ImportReportsView(FormView):\n template_name = 'file_import.html'\n form_class = UploadReportForm\n\n def dispatch(self, request, *args, **kwargs):\n self.klass = get_object_or_404(Klass, pk=kwargs['pk'])\n self.title = \"Importation d'un fichier PDF de moyennes pour la classe {}\".format(self.klass.name)\n return super().dispatch(request, *args, **kwargs)\n\n def form_valid(self, form):\n upfile = form.cleaned_data['upload']\n klass_name = upfile.name[:-4]\n redirect_url = reverse('class', args=[self.klass.pk])\n\n if self.klass.name != klass_name:\n messages.error(\n self.request,\n \"Le fichier téléchargé ne correspond pas à la classe {} !\".format(self.klass.name)\n )\n return HttpResponseRedirect(redirect_url)\n\n # Check poppler-utils presence on server\n res = call(['pdftotext', '-v'], stderr=PIPE)\n if res != 0:\n messages.error(\n self.request,\n \"Unable to find pdftotext on your system. Try to install the poppler-utils package.\"\n )\n return HttpResponseRedirect(redirect_url)\n\n # Move the file to MEDIA directory\n pdf_origin = os.path.join(settings.MEDIA_ROOT, upfile.name)\n with open(pdf_origin, 'wb+') as destination:\n for chunk in upfile.chunks():\n destination.write(chunk)\n\n try:\n self.import_reports(pdf_origin, form.cleaned_data['semester'])\n except Exception as err:\n raise\n if settings.DEBUG:\n raise\n else:\n messages.error(self.request, \"Erreur durant l'importation des bulletins PDF: %s\" % err)\n return HttpResponseRedirect(redirect_url)\n\n def import_reports(self, pdf_path, semester):\n path = os.path.abspath(pdf_path)\n student_regex = r'[E|É]lève\\s*:\\s*([^\\n]*)'\n # Directory automatically deleted when the variable is deleted\n _temp_dir = tempfile.TemporaryDirectory()\n temp_dir = _temp_dir.name\n\n os.system(\"pdfseparate %s %s/%s_%%d.pdf\" % (path, temp_dir, os.path.basename(path)[:-4]))\n\n # Look for student names in each separated PDF and rename PDF with student name\n pdf_count = 0\n pdf_field = 'report_sem' + semester\n for filename in os.listdir(temp_dir):\n p = Popen(['pdftotext', os.path.join(temp_dir, filename), '-'],\n shell=False, stdout=PIPE, stderr=PIPE)\n output, errs = p.communicate()\n m = re.search(student_regex, output.decode('utf-8'))\n if not m:\n print(\"Unable to find student name in %s\" % filename)\n continue\n student_name = m.groups()[0]\n # Find a student with the found student_name\n try:\n student = self.klass.student_set.exclude(archived=True\n ).annotate(fullname=Concat('last_name', Value(' '), 'first_name')).get(fullname=student_name)\n except Student.DoesNotExist:\n messages.warning(\n self.request,\n \"Impossible de trouver l'étudiant {} dans la classe {}\".format(student_name, self.klass.name)\n )\n continue\n with open(os.path.join(temp_dir, filename), 'rb') as pdf:\n getattr(student, pdf_field).save(filename, File(pdf), save=True)\n student.save()\n pdf_count += 1\n\n messages.success(\n self.request,\n '{0} bulletins PDF ont été importés pour la classe {1} (sur {2} élèves)'.format(\n pdf_count, self.klass.name,\n self.klass.student_set.exclude(archived=True).count()\n )\n )\n", "sub_path": "stages/views/imports.py", "file_name": "imports.py", "file_ext": "py", "file_size_in_byte": 21837, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.views.generic.FormView", "line_number": 31, "usage_type": "name"}, {"api_name": "datetime.datetime.strptime", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "name"}, {"api_name": "tabimport.CSVImportedFile", "line_number": 52, "usage_type": "call"}, {"api_name": "django.core.files.File", "line_number": 52, "usage_type": "call"}, {"api_name": "tabimport.FileFactory", "line_number": 54, "usage_type": "call"}, {"api_name": "django.db.transaction.atomic", "line_number": 55, "usage_type": "call"}, {"api_name": "django.db.transaction", "line_number": 55, "usage_type": "name"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 58, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 58, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 63, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 63, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 67, "usage_type": "name"}, {"api_name": "django.contrib.messages.info", "line_number": 69, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 69, "usage_type": "name"}, {"api_name": "django.contrib.messages.warning", "line_number": 71, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 71, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 72, "usage_type": "call"}, {"api_name": "django.urls.reverse", "line_number": 72, "usage_type": "call"}, {"api_name": "forms.StudentImportForm", "line_number": 77, "usage_type": "name"}, {"api_name": "utils.is_int", "line_number": 130, "usage_type": "call"}, {"api_name": "models.Klass.objects.get", "line_number": 138, "usage_type": "call"}, {"api_name": "models.Klass.objects", "line_number": 138, "usage_type": "attribute"}, {"api_name": "models.Klass", "line_number": 138, "usage_type": "name"}, {"api_name": "models.Klass.DoesNotExist", "line_number": 139, "usage_type": "attribute"}, {"api_name": "models.Klass", "line_number": 139, "usage_type": "name"}, {"api_name": "models.Option.objects.get", "line_number": 146, "usage_type": "call"}, {"api_name": "models.Option.objects", "line_number": 146, "usage_type": "attribute"}, {"api_name": "models.Option", "line_number": 146, "usage_type": "name"}, {"api_name": "models.Option.DoesNotExist", "line_number": 147, "usage_type": "attribute"}, {"api_name": "models.Option", "line_number": 147, "usage_type": "name"}, {"api_name": "models.Student.objects.filter", "line_number": 155, "usage_type": "call"}, {"api_name": "models.Student.objects", "line_number": 155, "usage_type": "attribute"}, {"api_name": "models.Student", "line_number": 155, "usage_type": "name"}, {"api_name": "models.Section.objects.all", "line_number": 158, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 158, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 158, "usage_type": "name"}, {"api_name": "candidats.models.Candidate.objects.get", "line_number": 163, "usage_type": "call"}, {"api_name": "candidats.models.Candidate.objects", "line_number": 163, "usage_type": "attribute"}, {"api_name": "candidats.models.Candidate", "line_number": 163, "usage_type": "name"}, {"api_name": "models.Option.objects.get", "line_number": 167, "usage_type": "call"}, {"api_name": "models.Option.objects", "line_number": 167, "usage_type": "attribute"}, {"api_name": "models.Option", "line_number": 167, "usage_type": "name"}, {"api_name": "models.Teacher.objects.all", "line_number": 187, "usage_type": "call"}, {"api_name": "models.Teacher.objects", "line_number": 187, "usage_type": "attribute"}, {"api_name": "models.Teacher", "line_number": 187, "usage_type": "name"}, {"api_name": "fnmatch.fnmatch", "line_number": 197, "usage_type": "call"}, {"api_name": "models.Student.objects.get", "line_number": 231, "usage_type": "call"}, {"api_name": "models.Student.objects", "line_number": 231, "usage_type": "attribute"}, {"api_name": "models.Student", "line_number": 231, "usage_type": "name"}, {"api_name": "models.Student.DoesNotExist", "line_number": 243, "usage_type": "attribute"}, {"api_name": "models.Student", "line_number": 243, "usage_type": "name"}, {"api_name": "candidats.models.Candidate.DoesNotExist", "line_number": 246, "usage_type": "attribute"}, {"api_name": "candidats.models.Candidate", "line_number": 246, "usage_type": "name"}, {"api_name": "models.Student.objects.create", "line_number": 255, "usage_type": "call"}, {"api_name": "models.Student.objects", "line_number": 255, "usage_type": "attribute"}, {"api_name": "models.Student", "line_number": 255, "usage_type": "name"}, {"api_name": "models.Student.objects.get", "line_number": 262, "usage_type": "call"}, {"api_name": "models.Student.objects", "line_number": 262, "usage_type": "attribute"}, {"api_name": "models.Student", "line_number": 262, "usage_type": "name"}, {"api_name": "utils.is_int", "line_number": 274, "usage_type": "call"}, {"api_name": "models.Corporation.objects.get_or_create", "line_number": 277, "usage_type": "call"}, {"api_name": "models.Corporation.objects", "line_number": 277, "usage_type": "attribute"}, {"api_name": "models.Corporation", "line_number": 277, "usage_type": "name"}, {"api_name": "django.db.IntegrityError", "line_number": 281, "usage_type": "name"}, {"api_name": "models.Corporation.objects.get", "line_number": 285, "usage_type": "call"}, {"api_name": "models.Corporation.objects", "line_number": 285, "usage_type": "attribute"}, {"api_name": "models.Corporation", "line_number": 285, "usage_type": "name"}, {"api_name": "models.Corporation.DoesNotExist", "line_number": 290, "usage_type": "attribute"}, {"api_name": "models.Corporation", "line_number": 290, "usage_type": "name"}, {"api_name": "models.Corporation.MultipleObjectsReturned", "line_number": 292, "usage_type": "attribute"}, {"api_name": "models.Corporation", "line_number": 292, "usage_type": "name"}, {"api_name": "models.Student.objects.filter", "line_number": 326, "usage_type": "call"}, {"api_name": "models.Student.objects", "line_number": 326, "usage_type": "attribute"}, {"api_name": "models.Student", "line_number": 326, "usage_type": "name"}, {"api_name": "models.Section.objects.all", "line_number": 329, "usage_type": "call"}, {"api_name": "models.Section.objects", "line_number": 329, "usage_type": "attribute"}, {"api_name": "models.Section", "line_number": 329, "usage_type": "name"}, {"api_name": "forms.UploadHPFileForm", "line_number": 341, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 349, "usage_type": "call"}, {"api_name": "models.Teacher.objects.all", "line_number": 377, "usage_type": "call"}, {"api_name": "models.Teacher.objects", "line_number": 377, "usage_type": "attribute"}, {"api_name": "models.Teacher", "line_number": 377, "usage_type": "name"}, {"api_name": "models.Course.objects.all", "line_number": 378, "usage_type": "call"}, {"api_name": "models.Course.objects", "line_number": 378, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 378, "usage_type": "name"}, {"api_name": "models.Course.objects.get_or_create", "line_number": 392, "usage_type": "call"}, {"api_name": "models.Course.objects", "line_number": 392, "usage_type": "attribute"}, {"api_name": "models.Course", "line_number": 392, "usage_type": "name"}, {"api_name": "forms.UploadHPFileForm", "line_number": 421, "usage_type": "name"}, {"api_name": "models.Student.objects.get", "line_number": 428, "usage_type": "call"}, {"api_name": "models.Student.objects", "line_number": 428, "usage_type": "attribute"}, {"api_name": "models.Student", "line_number": 428, "usage_type": "name"}, {"api_name": "models.Student.DoesNotExist", "line_number": 429, "usage_type": "attribute"}, {"api_name": "models.Student", "line_number": 429, "usage_type": "name"}, {"api_name": "models.Corporation.objects.get", "line_number": 440, "usage_type": "call"}, {"api_name": "models.Corporation.objects", "line_number": 440, "usage_type": "attribute"}, {"api_name": "models.Corporation", "line_number": 440, "usage_type": "name"}, {"api_name": "models.Corporation.DoesNotExist", "line_number": 441, "usage_type": "attribute"}, {"api_name": "models.Corporation", "line_number": 441, "usage_type": "name"}, {"api_name": "models.CorpContact.objects.create", "line_number": 458, "usage_type": "call"}, {"api_name": "models.CorpContact.objects", "line_number": 458, "usage_type": "attribute"}, {"api_name": "models.CorpContact", "line_number": 458, "usage_type": "name"}, {"api_name": "django.views.generic.FormView", "line_number": 476, "usage_type": "name"}, {"api_name": "forms.UploadReportForm", "line_number": 478, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 481, "usage_type": "call"}, {"api_name": "models.Klass", "line_number": 481, "usage_type": "argument"}, {"api_name": "django.urls.reverse", "line_number": 488, "usage_type": "call"}, {"api_name": "django.contrib.messages.error", "line_number": 491, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 491, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 495, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 498, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 498, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 500, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 500, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 504, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 507, "usage_type": "call"}, {"api_name": "os.path", "line_number": 507, "usage_type": "attribute"}, {"api_name": "django.conf.settings.MEDIA_ROOT", "line_number": 507, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 507, "usage_type": "name"}, {"api_name": "django.conf.settings.DEBUG", "line_number": 516, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 516, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 519, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 519, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 520, "usage_type": "call"}, {"api_name": "os.path.abspath", "line_number": 523, "usage_type": "call"}, {"api_name": "os.path", "line_number": 523, "usage_type": "attribute"}, {"api_name": "tempfile.TemporaryDirectory", "line_number": 526, "usage_type": "call"}, {"api_name": "os.system", "line_number": 529, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 529, "usage_type": "call"}, {"api_name": "os.path", "line_number": 529, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 534, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 535, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 535, "usage_type": "call"}, {"api_name": "os.path", "line_number": 535, "usage_type": "attribute"}, {"api_name": "subprocess.PIPE", "line_number": 536, "usage_type": "name"}, {"api_name": "re.search", "line_number": 538, "usage_type": "call"}, {"api_name": "django.db.models.functions.Concat", "line_number": 546, "usage_type": "call"}, {"api_name": "django.db.models.Value", "line_number": 546, "usage_type": "call"}, {"api_name": "models.Student.DoesNotExist", "line_number": 547, "usage_type": "attribute"}, {"api_name": "models.Student", "line_number": 547, "usage_type": "name"}, {"api_name": "django.contrib.messages.warning", "line_number": 548, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 548, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 553, "usage_type": "call"}, {"api_name": "os.path", "line_number": 553, "usage_type": "attribute"}, {"api_name": "django.core.files.File", "line_number": 554, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 558, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 558, "usage_type": "name"}]} +{"seq_id": "284041115", "text": "import argparse, os, copy, torch\r\nfrom cv2 import aruco\r\nimport cv2, sys, time, traceback\r\nimport numpy as np\r\n\r\n\r\nfrom models.experimental import attempt_load\r\nfrom utils.general import non_max_suppression, plot_images, output_to_target\r\n\r\nfrom RobotRaconteur.Client import *\r\n\r\n\r\nclass create_impl(object):\r\n def __init__(self):\r\n #initialize detection parameters\r\n self.BACKGROUND=150\r\n self.RESOLUTION=1\r\n raw_image_width=1280\r\n raw_image_height=720\r\n self.detection_start_r=round(.15*raw_image_height)\r\n self.detection_end_r=round(.75*raw_image_height)\r\n self.detection_start_c=round(.23*raw_image_width)\r\n self.detection_end_c=round(.48*raw_image_width)\r\n self.offset=np.zeros((4,3))\r\n self.offset[:,0]=self.detection_start_c\r\n self.offset[:,1]=self.detection_start_r\r\n self.device = torch.device('cuda')\r\n self.detection_width=192\r\n self.detection_height=256\r\n\r\n #transfer to cognex frame\r\n self.H=np.array([[ 0.99995151, -0.00984728, 0.08885063],\r\n [ 0.00984728, 0.99995151, -0.03708229],\r\n [ 0., 0., 1. ]])\r\n self.f=600\r\n self.center_c=640\r\n self.center_r=360\r\n\r\n # device = torch.device('cpu')\r\n # Load model\r\n self.model = attempt_load(\"my_test/kinect_weight.pt\",map_location=self.device) # load FP32 model\r\n self.names = {k: v for k, v in enumerate(self.model.names if hasattr(self.model, 'names') else self.model.module.names)}\r\n\r\n # Configure\r\n self.model.eval()\r\n\r\n #image pipe setting\r\n self.current_frame=None\r\n\r\n #auto discovery\r\n time.sleep(2)\r\n res=RRN.FindServiceByType(\"experimental.createwebcam2.WebcamHost\",\r\n [\"rr+local\",\"rr+tcp\",\"rrs+tcp\"])\r\n url=None\r\n for serviceinfo2 in res:\r\n if \"Webcam\" in serviceinfo2.NodeName:\r\n url=serviceinfo2.ConnectionURL\r\n break\r\n if url==None:\r\n print('service not found')\r\n sys.exit()\r\n\r\n #Startup, connect, and pull out the camera from the objref \r\n c_host=RRN.ConnectService(url)\r\n\r\n self.c=c_host.get_Webcams(0)\r\n\r\n #Connect the pipe FrameStream to get the PipeEndpoint p\r\n self.p=self.c.FrameStream.Connect(-1)\r\n\r\n #Set the callback for when a new pipe packet is received to the\r\n #new_frame function\r\n self.p.PacketReceivedEvent+=self.new_frame\r\n try:\r\n self.c.StartStreaming()\r\n except: pass\r\n #timing\r\n self.now=time.time()\r\n\r\n #orientation checking param\r\n gt_soap=cv2.cvtColor(cv2.imread(\"orientation_check/soap.jpg\"),cv2.COLOR_BGR2GRAY)\r\n gt_toothpaste=cv2.cvtColor(cv2.imread(\"orientation_check/toothpaste.jpg\"),cv2.COLOR_BGR2GRAY)\r\n gt_perfume=cv2.cvtColor(cv2.imread(\"orientation_check/perfume.jpg\"),cv2.COLOR_BGR2GRAY)\r\n self.max_size=38\r\n gt_soap_tensor=np.zeros((int(360/self.RESOLUTION),self.max_size,self.max_size))\r\n gt_toothpaste_tensor=np.zeros((int(360/self.RESOLUTION),self.max_size,self.max_size))\r\n gt_perfume_tensor=np.zeros((int(360/self.RESOLUTION),self.max_size,self.max_size))\r\n\r\n for i in range(0,360,self.RESOLUTION):\r\n gt_soap_tensor[int(i/self.RESOLUTION)]=self.square(self.rotate_image(gt_soap,i),self.max_size)\r\n gt_toothpaste_tensor[int(i/self.RESOLUTION)]=self.square(self.rotate_image(gt_toothpaste,i),self.max_size)\r\n gt_perfume_tensor[int(i/self.RESOLUTION)]=self.square(self.rotate_image(gt_perfume,i),self.max_size)\r\n\r\n self.gt_dict={0:gt_toothpaste_tensor,1:gt_perfume_tensor,2:gt_soap_tensor}\r\n \r\n #initialize objrecog structures\r\n self._object_recognition_sensor_data=None\r\n self.object_recognition_sensor_data_data=RRN.NewStructure(\"edu.robotraconteur.objectrecognition.ObjectRecognitionSensorData\")\r\n self.object_recognition_sensor_data_data.recognized_objects=RRN.NewStructure(\"edu.robotraconteur.objectrecognition.RecognizedObjects\") #recognized_objects\r\n\r\n self.recognized_object=RRN.NewStructure(\"edu.robotraconteur.objectrecognition.RecognizedObject\")\r\n self.recognized_object.recognized_object=RRN.NewStructure(\"com.robotraconteur.identifier.Identifier\") #name,uuid\r\n uuid_dtype=RRN.GetNamedArrayDType(\"com.robotraconteur.uuid.UUID\")\r\n self.recognized_object.recognized_object.uuid=np.zeros((1,),dtype=uuid_dtype)\r\n self.recognized_object.pose=RRN.NewStructure(\"com.robotraconteur.geometry.NamedPoseWithCovariance\")\r\n self.recognized_object.pose.pose=RRN.NewStructure(\"com.robotraconteur.geometry.NamedPose\")\r\n pose_dtype=RRN.GetNamedArrayDType(\"com.robotraconteur.geometry.Pose\")\r\n self.recognized_object.pose.pose.pose=np.zeros((1,),dtype=pose_dtype)\r\n\r\n #initialize detection obj map\r\n self.detection_objects={}\r\n self.detection_obj=RRN.NewStructure(\"edu.robotraconteur.objectrecognition.detection_obj\")\r\n self.models=['tp','pf','sp','bt']\r\n for name in self.models:\r\n self.detection_objects[name]=copy.deepcopy(self.detection_obj)\r\n self.detection_objects[name].name=name\r\n\r\n \r\n #object_recognition_sensor_data pipe member property getter and setter\r\n @property\r\n def object_recognition_sensor_data(self):\r\n return self._object_recognition_sensor_data\r\n @object_recognition_sensor_data.setter\r\n def object_recognition_sensor_data(self,data):\r\n self._object_recognition_sensor_data=data\r\n #Create the PipeBroadcaster and set backlog to 3 so packets\r\n #will be dropped if the transport is overloaded\r\n self._object_recognition_sensor_data_broadcaster=RR.PipeBroadcaster(data,1)\r\n\r\n #Function to take the data structure returned from the Webcam service\r\n #and convert it to an OpenCV array\r\n def WebcamImageToMat(self,image):\r\n frame2=image.data.reshape([image.height, image.width, 3], order='C')\r\n return frame2\r\n\r\n\r\n #This function is called when a new pipe packet arrives\r\n def new_frame(self,pipe_ep):\r\n #Loop to get the newest frame\r\n while (pipe_ep.Available > 0):\r\n try:\r\n #Receive the packet\r\n image=pipe_ep.ReceivePacket()\r\n #Convert the packet to an image and set the global variable\r\n tmp_frame=self.WebcamImageToMat(image)\r\n # tmp_frame=self.aruco_process(tmp_frame)\r\n detection_frame=self.test(tmp_frame[self.detection_start_r:self.detection_end_r,self.detection_start_c:self.detection_end_c,:])\r\n tmp_frame[self.detection_start_r:self.detection_end_r,self.detection_start_c:self.detection_end_c,:]=cv2.resize(detection_frame,(-self.detection_start_c+self.detection_end_c,-self.detection_start_r+self.detection_end_r))\r\n self.current_frame=tmp_frame\r\n # print(time.time()-self.now)\r\n self.now=time.time()\r\n except:\r\n traceback.print_exc()\r\n def aruco_process(self,frame):\r\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)\r\n parameters = aruco.DetectorParameters_create()\r\n # parameters.minMarkerPerimeterRate=0.00001\r\n # parameters.adaptiveThreshConstant=20\r\n # # parameters.minMarkerDistanceRate=0.005\r\n # parameters.adaptiveThreshWinSizeMin=5\r\n # parameters.adaptiveThreshWinSizeMax=10\r\n # parameters.adaptiveThreshWinSizeStep=1\r\n\r\n corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)\r\n frame_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)\r\n return frame_markers\r\n def square(self,im,desired_size):\r\n image=self.BACKGROUND*np.ones((desired_size, desired_size),np.uint8)\r\n shape_diff=[desired_size-im.shape[0],desired_size-im.shape[1]]\r\n start=[round(shape_diff[0]/2),round(shape_diff[1]/2)]\r\n end=[desired_size-round(shape_diff[0]/2),desired_size-round(shape_diff[1]/2)]\r\n image[start[0]:end[0],start[1]:end[1]]=cv2.resize(im,(end[1]-start[1],end[0]-start[0]))\r\n\r\n return image\r\n def rotate_image(self,mat, angle):\r\n \"\"\"\r\n Rotates an image (angle in degrees) and expands image to avoid cropping\r\n \"\"\"\r\n\r\n height, width = mat.shape[:2] # image shape has 3 dimensions\r\n\r\n image_center = (width/2, height/2) # getRotationMatrix2D needs coordinates in reverse order (width, height) compared to shape\r\n\r\n rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1.)\r\n\r\n # rotation calculates the cos and sin, taking absolutes of those.\r\n abs_cos = abs(rotation_mat[0,0]) \r\n abs_sin = abs(rotation_mat[0,1])\r\n\r\n # find the new width and height bounds\r\n bound_w = int(height * abs_sin + width * abs_cos)\r\n bound_h = int(height * abs_cos + width * abs_sin)\r\n\r\n # subtract old image center (bringing image back to origo) and adding the new image center coordinates\r\n rotation_mat[0, 2] += bound_w/2 - image_center[0]\r\n rotation_mat[1, 2] += bound_h/2 - image_center[1]\r\n\r\n # rotate image with the new bounds and translated rotation matrix\r\n rotated_mat = cv2.warpAffine(mat, rotation_mat, (bound_w, bound_h),cv2.BORDER_CONSTANT,borderValue=self.BACKGROUND)\r\n return rotated_mat\r\n\r\n\r\n def orientation(self,gt_tensor,bound_img):\r\n return self.RESOLUTION*np.argmin(np.linalg.norm(gt_tensor-bound_img,axis=(1,2)))\r\n\r\n\r\n def test(self,img_raw): # number of logged images\r\n\r\n \r\n #found objects\r\n found=np.zeros((4,3))\r\n\r\n try:\r\n img_resize=cv2.resize(img_raw,(self.detection_width,self.detection_height))\r\n img=cv2.cvtColor(img_resize, cv2.COLOR_BGR2RGB)\r\n img=img.transpose(2,0,1)\r\n img=torch.tensor(img.reshape((1,3,self.detection_height,self.detection_width))).float().to(self.device)\r\n except:\r\n traceback.print_exc()\r\n\r\n img /= 255.0 # 0 - 255 to 0.0 - 1.0\r\n nb, _, height, width = img.shape # batch size, channels, height, width\r\n\r\n # Run model\r\n inf_out, train_out = self.model(img) # inference and training outputs\r\n\r\n # Run NMS\r\n output = non_max_suppression(inf_out, conf_thres=0.8)\r\n\r\n if output[0]==None:\r\n return img_raw\r\n out=output[0].cpu().numpy()\r\n #sort last column\r\n out[out[:,-1].argsort()]\r\n out_filter=[]\r\n\r\n \r\n for i in range(len(out)):\r\n obj=out[i]\r\n #filter out extremely small detections\r\n # if abs(obj[3]-obj[1])<2 or abs(obj[2]-obj[0])<2 or obj[0]<0 or obj[1]<0 or obj[2]<0 or obj[3]<0:\r\n # out_filter.append(i)\r\n # continue\r\n #filter out obj not in region\r\n if obj[-1]==0 and (obj[0]<.45*width or obj[1]>0.3*height):\r\n out_filter.append(i)\r\n continue\r\n if obj[-1]==1 and (obj[0]>.45*width or obj[1]>0.3*height):\r\n out_filter.append(i)\r\n continue\r\n if obj[-1]==2 and (obj[0]<.5*width or obj[1]<0.6*height):\r\n out_filter.append(i)\r\n continue\r\n if obj[-1]==3 and (obj[0]>.5*width or obj[1]<0.6*height):\r\n out_filter.append(i)\r\n continue\r\n #if object already found\r\n if found[int(obj[-1])][0]:\r\n continue\r\n #check orientation except for bottle\r\n if obj[-1]!=3:\r\n img_crop=img_resize[int(obj[1]):int(obj[3]),int(obj[0]):int(obj[2]),:]\r\n try:\r\n angle=self.orientation(self.gt_dict[obj[-1]],self.square(cv2.cvtColor(img_crop,cv2.COLOR_BGR2GRAY),self.max_size))-90.\r\n except:\r\n continue\r\n else:\r\n angle=0\r\n\r\n found[int(obj[-1])]=np.array([(obj[0]+obj[2])/2,(obj[1]+obj[3])/2,angle])\r\n\r\n out=np.delete(out,out_filter,axis=0)\r\n\r\n img_out=cv2.cvtColor(plot_images(img, output_to_target([torch.tensor(out)], width, height),fname=None, names=self.names), cv2.COLOR_RGB2BGR)\r\n for i in range(4):\r\n \r\n if found[i][0]:\r\n\r\n img_out= cv2.putText(img_out, str(int(found[i][-1])), (int(found[i][0]),int(found[i][1])),fontFace=cv2.FONT_HERSHEY_COMPLEX_SMALL,fontScale=.5,color=(0,0,0))\r\n #my type\r\n kinect_cood_c=((found[i][0]*(self.detection_end_c-self.detection_start_c)/self.detection_width+self.detection_start_c)-self.center_c)/self.f\r\n kinect_cood_r=-((found[i][1]*(self.detection_end_r-self.detection_start_r)/self.detection_height+self.detection_start_r)-self.center_r)/self.f\r\n trans=np.dot(self.H,np.array([[kinect_cood_c],[kinect_cood_r],[1]]))\r\n list(self.detection_objects.values())[i].x = trans[0][0]\r\n list(self.detection_objects.values())[i].y = trans[1][0]\r\n list(self.detection_objects.values())[i].angle=found[i][-1]\r\n list(self.detection_objects.values())[i].detected=True\r\n else:\r\n list(self.detection_objects.values())[i].detected=False\r\n\r\n found[:,0]*=(self.detection_end_c-self.detection_start_c)/self.detection_width\r\n found[:,1]*=(self.detection_end_r-self.detection_start_r)/self.detection_height\r\n found+=self.offset \r\n\r\n\r\n #pass to RR wire\r\n self.detection_wire.OutValue=self.detection_objects \r\n\r\n return img_out\r\n\r\n\r\ndef main():\r\n #Accept the names of the webcams and the nodename from command line\r\n parser = argparse.ArgumentParser(description=\"kinect object detection service\")\r\n parser.add_argument(\"--view\",type=bool,default=False)\r\n args, _ = parser.parse_known_args()\r\n with RR.ServerNodeSetup(\"detection_Service\", 52222) as node_setup:\r\n\r\n cwd = os.getcwd()\r\n os.chdir('/home/rpi/catkin_ws/src/robotraconteur_companion/robdef/group1')\r\n RRN.RegisterServiceTypesFromFiles(['edu.robotraconteur.objectrecognition.robdef'],True) \r\n os.chdir(cwd)\r\n detection_inst=create_impl()\r\n try:\r\n RRN.RegisterService(\"detection\", \"edu.robotraconteur.objectrecognition.ObjectRecognitionSensor\", detection_inst)\r\n except:\r\n traceback.print_exc()\r\n \r\n if not args.view:\r\n input(\"press enter to quit\")\r\n while args.view:\r\n #Just loop resetting the frame\r\n #This is not ideal but good enough for demonstration\r\n\r\n if (not detection_inst.current_frame is None):\r\n cv2.imshow(\"Detection Image\",detection_inst.current_frame)\r\n if cv2.waitKey(50)!=-1:\r\n break\r\n cv2.destroyAllWindows()\r\n\r\n detection_inst.p.Close()\r\n detection_inst.c.StopStreaming()\r\n\r\nif __name__ == '__main__':\r\n \r\n main()\r\n\r\n", "sub_path": "yolo_detection/kinect_detection.py", "file_name": "kinect_detection.py", "file_ext": "py", "file_size_in_byte": 15247, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.zeros", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 32, "usage_type": "call"}, {"api_name": "models.experimental.attempt_load", "line_number": 41, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 51, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 61, "usage_type": "call"}, {"api_name": "time.time", "line_number": 78, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 81, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 81, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 82, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 82, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 83, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 87, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 108, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 115, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 148, "usage_type": "call"}, {"api_name": "time.time", "line_number": 151, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 153, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 155, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 155, "usage_type": "attribute"}, {"api_name": "cv2.aruco.Dictionary_get", "line_number": 156, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 156, "usage_type": "name"}, {"api_name": "cv2.aruco.DICT_6X6_250", "line_number": 156, "usage_type": "attribute"}, {"api_name": "cv2.aruco.DetectorParameters_create", "line_number": 157, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 157, "usage_type": "name"}, {"api_name": "cv2.aruco.detectMarkers", "line_number": 165, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 165, "usage_type": "name"}, {"api_name": "cv2.aruco.drawDetectedMarkers", "line_number": 166, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 166, "usage_type": "name"}, {"api_name": "numpy.ones", "line_number": 169, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 169, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 173, "usage_type": "call"}, {"api_name": "cv2.getRotationMatrix2D", "line_number": 185, "usage_type": "call"}, {"api_name": "cv2.warpAffine", "line_number": 200, "usage_type": "call"}, {"api_name": "cv2.BORDER_CONSTANT", "line_number": 200, "usage_type": "attribute"}, {"api_name": "numpy.argmin", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 205, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 212, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 215, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 216, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 216, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 218, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 220, "usage_type": "call"}, {"api_name": "utils.general.non_max_suppression", "line_number": 229, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 265, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 265, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 271, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 273, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 275, "usage_type": "call"}, {"api_name": "utils.general.plot_images", "line_number": 275, "usage_type": "call"}, {"api_name": "utils.general.output_to_target", "line_number": 275, "usage_type": "call"}, {"api_name": "torch.tensor", "line_number": 275, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 275, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 280, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX_SMALL", "line_number": 280, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 284, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 305, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 310, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 311, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 313, "usage_type": "call"}, {"api_name": "traceback.print_exc", "line_number": 318, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 327, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 328, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 330, "usage_type": "call"}]} +{"seq_id": "293412700", "text": "\nimport _setup_test_env # noqa\nimport sys\nimport unittest\nimport typing\nimport numpy\nfrom pytorch_to_returnn import torch\nfrom pytorch_to_returnn.converter import verify_torch_and_convert_to_returnn\n\n\ndef test_mnist():\n def model_func(wrapped_import, inputs):\n if wrapped_import:\n torch = wrapped_import(\"torch\")\n nn = wrapped_import(\"torch.nn\")\n F = wrapped_import(\"torch.nn.functional\")\n else:\n import torch\n import torch.nn as nn\n import torch.nn.functional as F\n\n # directly from here: https://github.com/pytorch/examples/blob/master/mnist/main.py\n class Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.conv1 = nn.Conv2d(1, 32, 3, 1)\n self.conv2 = nn.Conv2d(32, 64, 3, 1)\n self.dropout1 = nn.Dropout(0.25)\n self.dropout2 = nn.Dropout(0.5)\n self.fc1 = nn.Linear(9216, 128)\n self.fc2 = nn.Linear(128, 10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = F.relu(x)\n x = F.max_pool2d(x, 2)\n x = self.dropout1(x)\n x = torch.flatten(x, 1)\n x = self.fc1(x)\n x = F.relu(x)\n x = self.dropout2(x)\n x = self.fc2(x)\n output = F.log_softmax(x, dim=1)\n return output\n\n net = Net()\n net = net.eval() # disable dropout\n return net(inputs)\n\n rnd = numpy.random.RandomState(42)\n N, C, H, W = 64, 1, 28, 28\n x = rnd.normal(0., 1., (N, C, H, W)).astype(\"float32\")\n verify_torch_and_convert_to_returnn(\n model_func, inputs=x, inputs_data_kwargs={\"shape\": (C, H, W)})\n\n\ndef test_custom_layer_norm():\n N, F, T = 64, 11, 28\n\n def model_func(wrapped_import, inputs):\n if wrapped_import:\n torch = wrapped_import(\"torch\")\n else:\n import torch\n\n class LayerNorm(torch.nn.LayerNorm):\n def __init__(self, nout, dim=-1):\n \"\"\"Construct an LayerNorm object.\"\"\"\n super(LayerNorm, self).__init__(nout, eps=1e-12)\n self.dim = dim\n\n def forward(self, x):\n if self.dim == -1:\n return super(LayerNorm, self).forward(x)\n return super(LayerNorm, self).forward(x.transpose(1, -1)).transpose(1, -1)\n\n class Net(torch.nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.norm = LayerNorm(nout=F, dim=1)\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n return self.norm(x)\n\n net = Net()\n return net(inputs)\n\n rnd = numpy.random.RandomState(42)\n x = rnd.normal(0., 1., (N, F, T)).astype(\"float32\")\n verify_torch_and_convert_to_returnn(model_func, inputs=x)\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) <= 1:\n for k, v in sorted(globals().items()):\n if k.startswith(\"test_\"):\n print(\"-\" * 40)\n print(\"Executing: %s\" % k)\n try:\n v()\n except unittest.SkipTest as exc:\n print(\"SkipTest:\", exc)\n print(\"-\" * 40)\n print(\"Finished all tests.\")\n else:\n assert len(sys.argv) >= 2\n for arg in sys.argv[1:]:\n print(\"Executing: %s\" % arg)\n if arg in globals():\n globals()[arg]() # assume function and execute\n else:\n eval(arg) # assume Python code and execute\n", "sub_path": "tests/test_converter.py", "file_name": "test_converter.py", "file_ext": "py", "file_size_in_byte": 3218, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pytorch_to_returnn.torch", "line_number": 14, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 23, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 23, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 27, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 29, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 29, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 31, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 31, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 35, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 35, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.functional.max_pool2d", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 38, "usage_type": "name"}, {"api_name": "torch.flatten", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn.functional.relu", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 42, "usage_type": "name"}, {"api_name": "torch.nn.functional.log_softmax", "line_number": 45, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.random.RandomState", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 52, "usage_type": "attribute"}, {"api_name": "pytorch_to_returnn.converter.verify_torch_and_convert_to_returnn", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn", "line_number": 68, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torch.nn.functional", "line_number": 82, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 84, "usage_type": "attribute"}, {"api_name": "numpy.random.RandomState", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 90, "usage_type": "attribute"}, {"api_name": "torch.nn.functional", "line_number": 91, "usage_type": "name"}, {"api_name": "pytorch_to_returnn.converter.verify_torch_and_convert_to_returnn", "line_number": 92, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 96, "usage_type": "attribute"}, {"api_name": "unittest.SkipTest", "line_number": 103, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 108, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 109, "usage_type": "attribute"}]} +{"seq_id": "401039355", "text": "import string\nfrom itertools import islice\nimport random\nimport numpy as np\nimport scipy.spatial.distance as dist\n\n\nclass ClusterLabel(object):\n def __init__(self, l_matrix_path, l_label):\n self.l_matrix_path = l_matrix_path\n self.l_data = {}\n self.l_label = l_label\n self.label_dic = {}\n\n def read_matrix(self):\n \"\"\"\n Read input gene profile matrix\n \"\"\"\n with open(self.l_matrix_path) as f:\n for i in islice(f, 1, None):\n node = i.strip().split('\\t')\n self.l_data[node[0]] = list(map(int, node[2:]))\n\n def read_label(self):\n \"\"\"\n Read input gene information\n :rtype: dict\n \"\"\"\n with open(self.l_label) as f:\n for i in f:\n l = i.strip().split('\\t')\n self.label_dic[l[1]] = l[0]\n return self.label_dic\n\n def choice(self, n):\n \"\"\"\n Choice the k value with label for k means\n :rtype: dict\n \"\"\"\n gene_id = list(self.label_dic.keys())\n # get random start\n choice_gene = random.sample(gene_id, n)\n index_label = random.sample(string.ascii_uppercase, n)\n\n choice_gene_label = dict(zip(choice_gene, index_label))\n\n return choice_gene_label\n\n def trans(self, k_dict):\n \"\"\"\n Transform k dict to matrix value\n :param k_dict: k label example:{'11648': 'M', '11632': 'Y', '11652': 'N'}\n :return:\n \"\"\"\n r_k_dic = {v: k for k, v in k_dict.items()}\n k_dic_trans = {}\n for k, v in r_k_dic.items():\n k_dic_trans[k] = self.l_data[v]\n return k_dic_trans\n\n def nearest_distance(self, k_dict_trans, v_array):\n \"\"\"\n Calculate every label to k distance\n :param k_dict_trans: k dict trans to matrix\n :param v_array: a array example: array([[1, 1, 1, 1, 1, 1,]])\n :return: label\n \"\"\"\n k_dic = list(k_dict_trans.keys())\n k_list = []\n for i in k_dic:\n k_list.append(k_dict_trans[i])\n k_array = np.array(k_list)\n\n distance = dist.cdist(k_array, v_array, metric='euclidean')\n # Get close distance\n min_d = 10000\n for ind, value in enumerate(distance):\n\n if value < min_d:\n min_d = value\n min_index = ind\n return k_dic[min_index]\n\n def min_dist(self, k_array, c_array):\n distance = dist.cdist(k_array, c_array, metric='euclidean')\n # Get close distance\n min_d = 10000\n for ind, value in enumerate(distance):\n\n if value < min_d:\n min_d = value\n min_index = ind\n return min_index, min_d\n\n def kpp(self, k):\n \"\"\"\n The method k-means++ for choice k\n :param k: The k cluster\n :return: The initial k center\n \"\"\"\n # initialize data\n cluster_center = {}\n cluster_center_l = []\n\n # get gene id and choice by random as center gene\n gene_id = list(self.l_data.keys())\n random.seed(0)\n cluster_center[0] = random.choice(gene_id)\n print(cluster_center)\n # cluster cluster list\n for v in cluster_center.values():\n cluster_center_l.append(self.l_data[v])\n\n # do k-means++ algorithm\n for i in range(1, k):\n d = []\n for j, m in enumerate(self.l_data):\n k_array = np.array([self.l_data[m]])\n cluster_center_array = np.array(cluster_center_l)\n # get min distance\n min_d = self.min_dist(cluster_center_array, k_array)[1]\n d.append(min_d)\n # sum distance\n sum_d = sum(d)\n # random\n sum_d *= random.random()\n\n # get center\n for ind, dis in enumerate(d):\n sum_d -= dis\n if sum_d > 0:\n continue\n cluster_center[i] = gene_id[ind]\n break\n\n # add different label\n index_label = random.sample(string.ascii_uppercase, k)\n choice_gene_label = dict(zip(cluster_center.values(), index_label))\n print(choice_gene_label)\n return choice_gene_label\n\n def centre_label(self, k_dict, k_dict_trans):\n \"\"\"\n Get centre label matrix and the cluster\n :param k_dict: Input the random start k dictionary example: {'S': '11636', 'J': '11602', 'L': '11650'}\n :param k_dict_trans: The transform data from k dictionary example:{'S': [1.0, 1.0, 1.0, 1.0, 1.0, ..}\n :return: The cluster centre and the cluster members\n \"\"\"\n c_l_matrix = {}\n # reversal dic\n r_k_dic = {v: [] for v in k_dict.values()}\n # cluster label 1 iteration\n for k, v in self.l_data.items():\n l_label = self.nearest_distance(k_dict_trans, np.array([v]))\n if k not in r_k_dic[l_label]:\n r_k_dic[l_label].append(k)\n\n # calculate centre\n for k, v in r_k_dic.items():\n each_matrix = []\n for l in v:\n each_matrix.append(self.l_data[l])\n each_array = np.array(each_matrix)\n c_l_matrix[k] = list(each_array.mean(0))\n return c_l_matrix, r_k_dic\n\n def run_k_means(self, k):\n\n \"\"\"\n Start k means\n :param k: K is number of cluster value\n :return: Get the cluster centre and the cluster with members\n \"\"\"\n # get random start centre\n k_num = self.choice(int(k))\n # transform format\n k_num_trans = self.trans(k_num)\n\n # get start centre\n centre, labels = self.centre_label(k_num, k_num_trans)\n\n judge = True\n # do iterative stop when the centre are didn't change\n while judge:\n e_centre, e_labels = self.centre_label(k_num, centre)\n print(e_centre)\n if e_centre == centre:\n return e_centre, e_labels\n else:\n centre = e_centre\n\n def run_k_pp(self, k):\n\n \"\"\"\n Start k-means++ method\n :param k: K is number of cluster value\n :return: Get the cluster centre and the cluster with members\n \"\"\"\n # get random start centre\n k_num = self.kpp(k)\n # transform format\n k_num_trans = self.trans(k_num)\n\n # get start centre\n centre, labels = self.centre_label(k_num, k_num_trans)\n\n judge = True\n # do iterative stop when the centre are didn't change\n while judge:\n e_centre, e_labels = self.centre_label(k_num, centre)\n if e_centre == centre:\n return e_centre, e_labels\n else:\n centre = e_centre\n\n def write_label(self, knn_result, write_path):\n \"\"\"\n Write result wo local disk\n :param knn_result: The cluster result by k means or kpp\n :param write_path: The path to write\n \"\"\"\n f = open(write_path, 'w')\n for k, v in knn_result.items():\n for i in v:\n f.write(\"{0}\\t{1}\\t{2}\\n\".format(self.label_dic[i], int(i), k))\n f.close()\n\n\nif __name__ == '__main__':\n label = ClusterLabel(\"/home/yangfang/PathwayPredict/data/label_matrix.txt\",\n \"/home/yangfang/PathwayPredict/data/human_ci_id_adjust_knn.txt\")\n label.read_matrix()\n label.read_label()\n # zz = label.choice(3)\n # print(zz)\n # print(label.label_dic)\n # print(label.l_data)\n # y = list(map(int, label.l_data['11602']))\n # print(y)\n # print(label.nearest_distance(label.trans(zz), [y]))\n # xx, yy = label.centre_label(zz, label.trans(zz))\n # print(label.trans(zz))\n # print(xx)\n # print(yy)\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n # c, l = label.run_k_means(3)\n # print('++++++++++++++++++++')\n # print(c)\n # print(l)\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n c, l = label.run_k_pp(3)\n print('++++++++++++++++++++')\n print(c)\n print(l)\n label.write_label(l, \"/home/yangfang/PCSF/kmeans/k_pp_3_seed0.txt\")\n", "sub_path": "PaPr/k_means_method.py", "file_name": "k_means_method.py", "file_ext": "py", "file_size_in_byte": 8145, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "itertools.islice", "line_number": 20, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 42, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 43, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 43, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 72, "usage_type": "call"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 74, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 74, "usage_type": "name"}, {"api_name": "scipy.spatial.distance.cdist", "line_number": 85, "usage_type": "call"}, {"api_name": "scipy.spatial.distance", "line_number": 85, "usage_type": "name"}, {"api_name": "random.seed", "line_number": 107, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 119, "usage_type": "call"}, {"api_name": "random.random", "line_number": 126, "usage_type": "call"}, {"api_name": "random.sample", "line_number": 137, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 137, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 154, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 163, "usage_type": "call"}]} +{"seq_id": "362834723", "text": "# -*- coding: utf-8 -*-\n# Author: Chen Qifan\n# Time: 2018/4/8\n# Description:从仿真结果中提取出用于计算特征量的数据\n\nimport pandas as pd\nimport numpy as np\nimport re\nimport codecs\nimport xlwt\n\n\nclass OutParamList:\n def __init__(self):\n # 负载百分比列表\n self.flow_pct = ['_100', '_090', '_110']\n # 读取线路列表\n self.path_bus = r'D:\\Program Files\\BPA\\SAMPLE\\bap-IEEE39'\n with codecs.open(self.path_bus + r'\\bus.txt', 'r+', 'utf-8') as self.bus_file:\n bus_list_temp = self.bus_file.readlines()\n self.bus_list = [bus.split() for bus in bus_list_temp]\n # 故障类型\n self.fault_type = '3'\n # 故障参数列表\n # -故障产生时间\n self.t_occur = 5.0\n # -故障消失时间\n self.t_disappear_min = 10.0\n self.t_disappear_max = 30.0\n self.t_disappear_step = 2.5\n # -线路故障点百分比\n self.line_percent_min = 10\n self.line_percent_max = 90\n self.line_percent_step = 20\n\n # 创建故障相关参数列表,用于组成文件名与计算行数\n def creat_fault_param_list(self):\n # 节点名列表\n self.bus_name = []\n for name in self.bus_list:\n b1 = [''.join(d for d in name[0] if d.isdigit())]\n b2 = [''.join(d for d in name[1] if d.isdigit())]\n self.bus_name.append(b1 + b2)\n # 故障消失时间\n self.t_disappear = []\n t_temp = self.t_disappear_min\n while t_temp <= self.t_disappear_max:\n self.t_disappear.append(\"%.1f\" % t_temp)\n t_temp += self.t_disappear_step\n # 线路故障点百分比\n self.line_percent = []\n line_temp = self.line_percent_min\n while line_temp <= self.line_percent_max:\n self.line_percent.append(\"%d\" % line_temp)\n line_temp += self.line_percent_step\n\n # 计算该文件所需提取数据的行位置\n def row_num_calculate(self, t1, t2):\n # row_describ = 3 # 标题行到数据行间相有3行\n row_t0_neg = 4 # t0-数据行\n row_t0_pos = int(t1/0.5)+2 # t0+数据行\n row_tcL0 = int(t2/0.5+4)+3 # tcL 0+ 数据行\n row_tcL2 = 2\n row_tcL4 = 4 # tcL 4+ 数据行\n row_tcL10 = 10 # tcL 10+ 数据行\n row_tcL15 = 15 # tcL 15+ 数据行\n row_tcL20 = 20 # tcL 20+ 数据行\n row_tcL25 = 25 # tcL 25+ 数据行\n row_tcL30 = 30 # tcL 30+ 数据行\n row_tcL40 = 40 # tcL 40+ 数据行\n row_tcL50 = 50 # tcL 50+ 数据行\n row_tcL60 = 60 # tcL 60+ 数据行\n row_tcL80 = 80 # tcL 80+ 数据行\n row_tcL100 = 100 # tcL 100+ 数据行\n row_tcL120 = 120 # tcL 120+ 数据行\n row_tcL140 = 140 # tcL 140+ 数据行\n row_tcL160 = 160 # tcL 160+ 数据行\n row_tcL180 = 180 # tcL 180+ 数据行\n row_tcL200 = 200 # tcL 200+ 数据行\n row_tcL250 = 250 # tcL 250+ 数据行\n row_tcL300 = 300 # tcL 300+ 数据行\n row_tcL350 = 350 # tcL 350+ 数据行\n row_tcL400 = 400 # tcL 400+ 数据行\n row_tcL450 = 450 # tcL 450+ 数据行\n row_t_end = 507 # 最后时刻行\n self.row_list = [row_t0_neg, row_t0_neg+row_t0_pos, row_tcL0, row_tcL0+row_tcL2, row_tcL0+row_tcL4, row_tcL0+row_tcL10,\\\n row_tcL0+row_tcL15, row_tcL0+row_tcL20, row_tcL0+row_tcL25, row_tcL0+row_tcL30, \\\n row_tcL0+row_tcL40, row_tcL0+row_tcL50, row_tcL0+row_tcL60, row_tcL0+row_tcL80, \\\n row_tcL0+row_tcL100, row_tcL0+row_tcL120, row_tcL0+row_tcL140, row_tcL0+row_tcL160, \\\n row_tcL0 + row_tcL180, row_tcL0 + row_tcL200, row_tcL0 + row_tcL250, row_tcL0 + row_tcL300, \\\n row_tcL0 + row_tcL350, row_tcL0 + row_tcL400, row_tcL0 + row_tcL450, row_t_end]\n\n\nif __name__ == '__main__':\n output_param = OutParamList()\n output_param.creat_fault_param_list() # 创建故障相关参数列表,用于组成文件名与计算行数\n output_param.t_disappear += ['11.0', '11.5', '12.0', '13.0', '14.0', '16.0', '17.0', '18.0', '19.0']\n # output_param.row_num_calculate() # 计算该文件所需提取数据的行位置\n\n # 创建excel文件\n wbk = xlwt.Workbook(encoding='utf-8')\n wbk_sheet = wbk.add_sheet('para_out')\n style = xlwt.XFStyle()\n row_num = 0\n row0 = [u'发电机名', u'时间', u'功角(度)', u'速度偏差(Hz)', u'机械功率(MW)', u'电磁功率(MW)', u'加速功率(MW)', u'无功功率(Mvar)']\n for i in range(len(row0)):\n wbk_sheet.write(row_num, i, row0[i])\n row_num += 1\n col_num = 0\n # wbk.save(r'D:\\Program Files\\BPA\\SAMPLE\\bap-IEEE39_out\\feature.xls')\n\n # 读取文件路径\n path_parent = r'D:\\Program Files\\BPA\\SAMPLE\\bap-IEEE39_out' # 文件路径\n\n # ----------------------- 准备筛选数据 ------------------------ #\n file_name_list = [] # 文件名列表,加上级目录\n for flow in output_param.flow_pct:\n for bus in output_param.bus_name:\n for t_d in output_param.t_disappear:\n for line_p in output_param.line_percent:\n # 组合生成文件名\n path_sub = r'\\039bpa' + flow + r'\\039bpa' + flow + '_' + bus[0] + '_' + bus[\n 1] + '_' + output_param.fault_type + '_' + str(\"%.1f\" % output_param.t_occur) + '_' \\\n + t_d + '_' + line_p + '.xls'\n output_param.row_num_calculate(output_param.t_occur, float(t_d)) # 计算该文件所需提取数据的行位置\n path_read = path_parent + path_sub\n # file_name_list.append(file_name)\n # print(file_name)\n\n # for path_sub in file_name_list:\n # path_read = path_parent+path_sub\n # 创建excel文件\n wbk = xlwt.Workbook(encoding='utf-8')\n wbk_sheet = wbk.add_sheet('para_out')\n style = xlwt.XFStyle()\n row_num = 0\n row0 = [u'发电机名', u'时间', u'功角(度)', u'速度偏差(Hz)', u'机械功率(MW)', u'电磁功率(MW)', \\\n u'加速功率(MW)', u'无功功率(Mvar)']\n for i in range(len(row0)):\n wbk_sheet.write(row_num, i, row0[i])\n row_num += 1\n col_num = 0\n\n with codecs.open(path_read, 'r', encoding='gb18030') as data_file:\n data = data_file.readlines() # 逐行读取数据\n # print(data[:50])\n for line_num in range(len(data)):\n # 匹配到发电机行\n if re.findall('发电机', data[line_num]):\n bus_num = re.findall(r'BUS-\\d+', data[line_num])\n # print(bus_num)\n for r in output_param.row_list:\n line = bus_num + data[line_num + r].split()\n # print(line)\n for i in range(len(line)):\n # 数字均以数值型存入\n wbk_sheet.write(row_num, i,\n (re.findall(r'^-?\\d+.\\d+', line[i]) and [float(line[i])] or [line[i]])[0])\n # wbk_sheet.write()\n row_num += 1 # 下一行\n wbk.save(r'D:\\Program Files\\BPA\\SAMPLE\\bap-IEEE39_param'+path_sub)\n\n\n\n\n\n\n\n\n\n\n\n\n\n#pd.set_option('max_colwidth',100) # 设置每行显示100个字符\n#data = pd.read_csv(path,encoding='gb18030', header=None, squeeze=True) # 读取文件,编码'gb18030'\n#data = codecs.open(path, 'r', encoding='gb18030').readlines()\n#data.columns = ['AAA']\n#print(data[10:11].str.split().iget_value[0])\n#for line in data:\n\n\n#a = pd.Series(['Tom', 'William Rick', 'John', 'Alber@t'])\n#print(a.str.len())\n\n", "sub_path": "src/param_cal.py", "file_name": "param_cal.py", "file_ext": "py", "file_size_in_byte": 8260, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "codecs.open", "line_number": 19, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 101, "usage_type": "call"}, {"api_name": "xlwt.XFStyle", "line_number": 103, "usage_type": "call"}, {"api_name": "xlwt.Workbook", "line_number": 133, "usage_type": "call"}, {"api_name": "xlwt.XFStyle", "line_number": 135, "usage_type": "call"}, {"api_name": "codecs.open", "line_number": 144, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 149, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 150, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 158, "usage_type": "call"}]} +{"seq_id": "482256628", "text": "import re\nimport json\n\nacc = 0\n\ndef walk(node):\n def contains_red(d):\n \"\"\"Return True if d contains a property with the value 'red'\n \"\"\"\n for key, item in d.items():\n if isinstance(item, str):\n if item == \"red\":\n return True\n return False\n\n def search(item):\n if isinstance(item, dict) or isinstance(item, list):\n walk(item)\n # leaf\n else:\n if isinstance(item, int):\n global acc\n acc += item\n\n # handle lists\n if isinstance(node, list):\n for item in node:\n search(item)\n # handle dicts\n else:\n if not contains_red(node):\n for key, item in node.items():\n search(item)\n\ndef main():\n # a way to solve part 1 without json parsing\n text = \"\".join(open(\"../inputs/input12.txt\").readlines())\n res = re.findall( r\"-\\d+|\\d+\", text)\n print(sum([int(n) for n in res]))\n\n # for part 2 we have to parse the json object\n books = json.loads(text)\n walk(books)\n print(acc)\n\n #import pprint\n #pprint.PrettyPrinter().pprint(books)\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "2015/Python/solutions/day12.py", "file_name": "day12.py", "file_ext": "py", "file_size_in_byte": 1201, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "re.findall", "line_number": 38, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "150862804", "text": "from flask import request, render_template, redirect\nfrom flask_login import login_required, current_user\n\nfrom simplecms import app\nfrom simplecms.models.post import Post\nfrom simplecms.utils.render import ok, error\nfrom simplecms.utils.dump import dump_post_list, dump_post\n\n\n@app.route('/api/posts/new', methods=['POST'])\n@login_required\ndef posts_new():\n json_data = request.get_json()\n user_id = current_user.id\n author_name = json_data.get('author')\n category = json_data.get('category')\n title = json_data.get('title')\n content = json_data.get('content')\n\n if not all((author_name, category, title, content)):\n return error('没有提供所有参数')\n\n post = Post.create_post(\n user_id=user_id,\n author_name=author_name,\n category=category,\n title=title,\n content=content)\n return ok(dump_post(post, mode='only_id'))\n\n\n@app.route('/api/posts/', methods=['GET'])\n@login_required\ndef posts_id(id):\n try:\n post = Post.get(id=id)\n except Post.DoesNotExist:\n return error('post does not exist', 404)\n return ok(dump_post(post))\n\n\n@app.route('/api/posts//update', methods=['POST'])\n@login_required\ndef posts_id_update(id):\n try:\n post = Post.get(id=id)\n except Post.DoesNotExist:\n return error('post does not exist', 404)\n\n json_data = request.get_json()\n\n author_name = json_data.get('author')\n category = json_data.get('category')\n title = json_data.get('title')\n content = json_data.get('content')\n\n if not all((author_name, category, title, content)):\n return error('没有提供所有参数')\n\n post.update_post(\n author_name=author_name,\n category=category,\n title=title,\n content=content)\n return ok()\n\n\n@app.route('/api/posts//delete', methods=['POST'])\n@login_required\ndef posts_id_delete(id):\n try:\n post = Post.get(id=id)\n except Post.DoesNotExist:\n return error('post does not exist', 404)\n\n if post.magazine_posts.count():\n return error('存在关联的杂志引用,确定删除需要先删除关联的杂志引用')\n\n post.delete_instance()\n return ok()\n\n\n@app.route('/api/posts', methods=['GET'])\n@login_required\ndef posts():\n return ok(dump_post_list())\n\n\n@app.route('/posts/', methods=['GET'])\ndef posts_public(id):\n\n try:\n post = Post.get(id=id)\n except Post.DoesNotExist:\n return error('post does not exist', 404)\n\n print(post.category)\n\n if post.category != 'source':\n post.update_visits()\n\n if post.category in ('recipe', 'recipe_list'):\n return redirect(post.content)\n else:\n return render_template('posts.html', post=post)\n", "sub_path": "simplecms/views/post.py", "file_name": "post.py", "file_ext": "py", "file_size_in_byte": 2730, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "flask.request.get_json", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "flask_login.current_user.id", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask_login.current_user", "line_number": 14, "usage_type": "name"}, {"api_name": "simplecms.utils.render.error", "line_number": 21, "usage_type": "call"}, {"api_name": "simplecms.models.post.Post.create_post", "line_number": 23, "usage_type": "call"}, {"api_name": "simplecms.models.post.Post", "line_number": 23, "usage_type": "name"}, {"api_name": "simplecms.utils.render.ok", "line_number": 29, "usage_type": "call"}, {"api_name": "simplecms.utils.dump.dump_post", "line_number": 29, "usage_type": "call"}, {"api_name": "simplecms.app.route", "line_number": 10, "usage_type": "call"}, {"api_name": "simplecms.app", "line_number": 10, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 11, "usage_type": "name"}, {"api_name": "simplecms.models.post.Post.get", "line_number": 36, "usage_type": "call"}, {"api_name": "simplecms.models.post.Post", "line_number": 36, "usage_type": "name"}, {"api_name": "simplecms.models.post.Post.DoesNotExist", "line_number": 37, "usage_type": "attribute"}, {"api_name": "simplecms.models.post.Post", "line_number": 37, "usage_type": "name"}, {"api_name": "simplecms.utils.render.error", "line_number": 38, "usage_type": "call"}, {"api_name": "simplecms.utils.render.ok", "line_number": 39, "usage_type": "call"}, {"api_name": "simplecms.utils.dump.dump_post", "line_number": 39, "usage_type": "call"}, {"api_name": "simplecms.app.route", "line_number": 32, "usage_type": "call"}, {"api_name": "simplecms.app", "line_number": 32, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 33, "usage_type": "name"}, {"api_name": "simplecms.models.post.Post.get", "line_number": 46, "usage_type": "call"}, {"api_name": "simplecms.models.post.Post", "line_number": 46, "usage_type": "name"}, {"api_name": "simplecms.models.post.Post.DoesNotExist", "line_number": 47, "usage_type": "attribute"}, {"api_name": "simplecms.models.post.Post", "line_number": 47, "usage_type": "name"}, {"api_name": "simplecms.utils.render.error", "line_number": 48, "usage_type": "call"}, {"api_name": "flask.request.get_json", "line_number": 50, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 50, "usage_type": "name"}, {"api_name": "simplecms.utils.render.error", "line_number": 58, "usage_type": "call"}, {"api_name": "simplecms.utils.render.ok", "line_number": 65, "usage_type": "call"}, {"api_name": "simplecms.app.route", "line_number": 42, "usage_type": "call"}, {"api_name": "simplecms.app", "line_number": 42, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 43, "usage_type": "name"}, {"api_name": "simplecms.models.post.Post.get", "line_number": 72, "usage_type": "call"}, {"api_name": "simplecms.models.post.Post", "line_number": 72, "usage_type": "name"}, {"api_name": "simplecms.models.post.Post.DoesNotExist", "line_number": 73, "usage_type": "attribute"}, {"api_name": "simplecms.models.post.Post", "line_number": 73, "usage_type": "name"}, {"api_name": "simplecms.utils.render.error", "line_number": 74, "usage_type": "call"}, {"api_name": "simplecms.utils.render.error", "line_number": 77, "usage_type": "call"}, {"api_name": "simplecms.utils.render.ok", "line_number": 80, "usage_type": "call"}, {"api_name": "simplecms.app.route", "line_number": 68, "usage_type": "call"}, {"api_name": "simplecms.app", "line_number": 68, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 69, "usage_type": "name"}, {"api_name": "simplecms.utils.render.ok", "line_number": 86, "usage_type": "call"}, {"api_name": "simplecms.utils.dump.dump_post_list", "line_number": 86, "usage_type": "call"}, {"api_name": "simplecms.app.route", "line_number": 83, "usage_type": "call"}, {"api_name": "simplecms.app", "line_number": 83, "usage_type": "name"}, {"api_name": "flask_login.login_required", "line_number": 84, "usage_type": "name"}, {"api_name": "simplecms.models.post.Post.get", "line_number": 93, "usage_type": "call"}, {"api_name": "simplecms.models.post.Post", "line_number": 93, "usage_type": "name"}, {"api_name": "simplecms.models.post.Post.DoesNotExist", "line_number": 94, "usage_type": "attribute"}, {"api_name": "simplecms.models.post.Post", "line_number": 94, "usage_type": "name"}, {"api_name": "simplecms.utils.render.error", "line_number": 95, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 103, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 105, "usage_type": "call"}, {"api_name": "simplecms.app.route", "line_number": 89, "usage_type": "call"}, {"api_name": "simplecms.app", "line_number": 89, "usage_type": "name"}]} +{"seq_id": "258986383", "text": "from __future__ import print_function, division\n\n__all__ = ['load', 'clone', 'Compound', 'Particle']\n\nimport collections\nfrom collections import OrderedDict, defaultdict\nfrom copy import deepcopy\nimport itertools\nimport os\nimport sys\nfrom warnings import warn\n\nimport mdtraj as md\nimport nglview\nimport numpy as np\nfrom oset import oset as OrderedSet\nimport parmed as pmd\nfrom parmed.periodic_table import AtomicNum, element_by_name, Mass\nfrom six import integer_types, string_types\n\nfrom mbuild.bond_graph import BondGraph\nfrom mbuild.box import Box\nfrom mbuild.exceptions import MBuildError\nfrom mbuild.periodic_kdtree import PeriodicCKDTree\nfrom mbuild.utils.io import run_from_ipython\nfrom mbuild.formats.hoomdxml import write_hoomdxml\nfrom mbuild.formats.lammpsdata import write_lammpsdata\n\n\ndef load(filename, relative_to_module=None, compound=None, coords_only=False,\n **kwargs):\n \"\"\"Load a file into an mbuild compound.\n\n Parameters\n ----------\n filename : str\n relative_to_module :\n compound : mb.Compound, optional\n coords_only : bool, optional\n Only load the coordinates into an existing compoint.\n\n Returns\n -------\n compound : mb.Compound\n\n \"\"\"\n # Handle mbuild *.py files containing a class that wraps a structure file\n # in its own folder. E.g., you build a system from ~/foo.py and it imports\n # from ~/bar/baz.py where baz.py loads ~/bar/baz.pdb.\n if relative_to_module:\n script_path = os.path.realpath(sys.modules[relative_to_module].__file__)\n file_dir = os.path.dirname(script_path)\n filename = os.path.join(file_dir, filename)\n\n if compound is None:\n compound = Compound()\n\n traj = md.load(filename, **kwargs)\n compound.from_trajectory(traj, frame=-1, coords_only=coords_only)\n return compound\n\n\ndef clone(existing_compound, clone_of=None, root_container=None):\n \"\"\"A faster alternative to deepcopying.\n\n Does not resolve circular dependencies. This should be safe provided\n you never try to add the top of a Compound hierarchy to a\n sub-Compound.\n \"\"\"\n if clone_of is None:\n clone_of = dict()\n\n newone = existing_compound._clone(clone_of=clone_of, root_container=root_container)\n existing_compound._clone_bonds(clone_of=clone_of)\n return newone\n\n\nclass Compound(object):\n \"\"\"A building block in the mBuild hierarchy.\n\n Compound is the superclass of all composite building blocks in the mBuild\n hierarchy. That is, all composite building blocks must inherit from\n compound, either directly or indirectly. The design of Compound follows the\n Composite design pattern (Gamma, Erich; Richard Helm; Ralph Johnson; John\n M. Vlissides (1995). Design Patterns: Elements of Reusable Object-Oriented\n Software. Addison-Wesley. p. 395. ISBN 0-201-63361-2.), with Compound being\n the composite, and Particle playing the role of the primitive (leaf) part,\n where Particle is in fact simply an alias to the Compound class.\n\n Compound maintains a list of children (other Compounds contained within), and\n provides a means to tag the children with labels, so that the compounds can\n be easily looked up later. Labels may also point to objects outside the\n Compound's containment hierarchy. Compound has built-in support for copying\n and deepcopying Compound hierarchies, enumerating particles or bonds in the\n hierarchy, proximity based searches, visualization, I/O operations, and a\n number of other convenience methods.\n\n Parameters\n ----------\n subcompounds : Compound, optional, default=None\n One or more compounds to be added to self.\n name : str, optional, default=self.__class__.__name__\n The type of Compound.\n periodicity : np.ndarray, shape=(3,), dtype=float, optional\n The periodic lengths of the Compound in the x, y and z directions.\n Defaults to zeros which is treated as non-periodic.\n\n Attributes\n ----------\n name : str, optional, default=self.__class__.__name__\n The type of Compound.\n periodicity : np.ndarray, shape=(3,), dtype=float, optional\n The periodic lengths of the Compound in the x, y and z directions.\n Defaults to zeros which is treated as non-periodic.\n children : OrderedSet\n Contains all children (other Compounds).\n labels : OrderedDict\n Labels to Compound/Atom mappings. These do not necessarily need not be\n in self.children.\n parent : mb.Compound\n The parent Compound that contains this part. Can be None if this\n compound is the root of the containment hierarchy.\n referrers : set\n Other compounds that reference this part with labels.\n\n \"\"\"\n\n def __init__(self, subcompounds=None, name=None, pos=None, charge=0.0,\n periodicity=None, port_particle=False):\n super(Compound, self).__init__()\n\n if name:\n if not isinstance(name, string_types):\n raise ValueError('Compound.name should be a string. You passed '\n '{}'.format(name))\n self.name = name\n else:\n self.name = self.__class__.__name__\n\n # A periodocity of zero in any direction is treated as non-periodic.\n if periodicity is None:\n self._periodicity = np.array([0.0, 0.0, 0.0])\n else:\n self._periodicity = np.asarray(periodicity)\n\n if pos is not None:\n self._pos = np.asarray(pos, dtype=float)\n else:\n self._pos = np.zeros(3)\n\n self.charge = charge\n\n self.parent = None\n self.children = OrderedSet()\n self.labels = OrderedDict()\n self.referrers = set()\n\n self.bond_graph = None\n self.port_particle = port_particle\n\n # self.add() must be called after labels and children are initialized.\n if subcompounds:\n self.add(subcompounds)\n\n def particles(self, include_ports=False):\n \"\"\" \"\"\"\n if not self.children:\n yield self\n else:\n for particle in self._particles(include_ports):\n yield particle\n\n def _particles(self, include_ports=False):\n \"\"\"Return all Particles of the Compound. \"\"\"\n for child in self.successors():\n if not child.children:\n if include_ports or not child.port_particle:\n yield child\n\n def successors(self):\n \"\"\"Yield Compounds below self in the hierarchy. \"\"\"\n if not self.children:\n return\n for part in self.children:\n # Parts local to the current Compound.\n yield part\n # Parts further down the hierarchy.\n for subpart in part.successors():\n yield subpart\n\n @property\n def n_particles(self):\n if not self.children:\n return 1\n else:\n return self._n_particles(include_ports=False)\n\n def _n_particles(self, include_ports=False):\n \"\"\"Return the number of Particles in the Compound. \"\"\"\n return sum(1 for _ in self._particles(include_ports))\n\n def _contains_only_ports(self):\n for part in self.children:\n if not part.port_particle:\n return False\n return True\n\n def ancestors(self):\n \"\"\"Generate all ancestors of the Compound recursively. \"\"\"\n if self.parent is not None:\n yield self.parent\n for ancestor in self.parent.ancestors():\n yield ancestor\n\n @property\n def root(self):\n parent = None\n for parent in self.ancestors():\n pass\n if parent is None:\n return self\n return parent\n\n def particles_by_name(self, name):\n for particle in self.particles():\n if particle.name == name:\n yield particle\n\n def add(self, new_child, label=None, containment=True, replace=False,\n inherit_periodicity=True):\n \"\"\"Add a part to the Compound.\n\n Note:\n This does not necessarily add the part to self.children but may\n instead be used to add a reference to the part to self.labels. See\n 'containment' argument.\n\n Parameters\n ----------\n new_child : mb.Compound or list-like of mb.Compound\n The object(s) to be added to this Compound.\n label : str, optional\n A descriptive string for the part.\n containment : bool, optional, default=True\n Add the part to self.children.\n replace : bool, optional, default=True\n Replace the label if it already exists.\n\n \"\"\"\n # Support batch add via lists, tuples and sets.\n if (isinstance(new_child, collections.Iterable) and\n not isinstance(new_child, string_types)):\n for child in new_child:\n self.add(child)\n return\n\n if not isinstance(new_child, Compound):\n raise ValueError('Only objects that inherit from mbuild.Compound '\n 'can be added to Compounds. You tried to add '\n '\"{}\".'.format(new_child))\n\n # Create children and labels on the first add operation\n if self.children is None:\n self.children = OrderedSet()\n if self.labels is None:\n self.labels = OrderedDict()\n\n if containment:\n if new_child.parent is not None:\n raise MBuildError('Part {} already has a parent: {}'.format(\n new_child, new_child.parent))\n self.children.add(new_child)\n new_child.parent = self\n\n if new_child.bond_graph is not None:\n if self.root.bond_graph is None:\n self.root.bond_graph = new_child.bond_graph\n else:\n self.root.bond_graph.compose(new_child.bond_graph)\n\n new_child.bond_graph = None\n\n # Add new_part to labels. Does not currently support batch add.\n if label is None:\n label = '{0}[$]'.format(new_child.__class__.__name__)\n\n if label.endswith('[$]'):\n label = label[:-3]\n if label not in self.labels:\n self.labels[label] = []\n label_pattern = label + '[{}]'\n\n count = len(self.labels[label])\n self.labels[label].append(new_child)\n label = label_pattern.format(count)\n\n if not replace and label in self.labels:\n raise MBuildError('Label \"{0}\" already exists in {1}.'.format(\n label, self))\n else:\n self.labels[label] = new_child\n new_child.referrers.add(self)\n\n if (inherit_periodicity and isinstance(new_child, Compound) and\n new_child.periodicity.any()):\n self.periodicity = new_child.periodicity\n\n def remove(self, objs_to_remove):\n \"\"\"Remove children from the Compound. \"\"\"\n if not self.children:\n return\n\n if not hasattr(objs_to_remove, '__iter__'):\n objs_to_remove = [objs_to_remove]\n objs_to_remove = set(objs_to_remove)\n\n if len(objs_to_remove) == 0:\n return\n\n intersection = objs_to_remove.intersection(self.children)\n self.children -= intersection\n objs_to_remove -= intersection\n\n for removed_part in intersection:\n if self.root.bond_graph and self.root.bond_graph.has_node(removed_part):\n self.root.bond_graph.remove_node(removed_part)\n self._remove_references(removed_part)\n\n # Remove the part recursively from sub-compounds.\n if self.children:\n for part in self.children:\n part.remove(objs_to_remove)\n\n @staticmethod\n def _remove_references(removed_part):\n \"\"\"Remove labels pointing to this part and vice versa. \"\"\"\n removed_part.parent = None\n\n # Remove labels in the hierarchy pointing to this part.\n referrers_to_remove = set()\n for referrer in removed_part.referrers:\n if removed_part not in referrer.ancestors():\n for label, referred_part in list(referrer.labels.items()):\n if referred_part is removed_part:\n del referrer.labels[label]\n referrers_to_remove.add(referrer)\n removed_part.referrers -= referrers_to_remove\n\n # Remove labels in this part pointing into the hierarchy.\n labels_to_delete = []\n if isinstance(removed_part, Compound):\n for label, part in removed_part.labels.items():\n if removed_part not in part.ancestors():\n part.referrers.remove(removed_part)\n labels_to_delete.append(label)\n for label in labels_to_delete:\n del removed_part.labels[label]\n\n def referenced_ports(self):\n \"\"\"Return all Ports referenced by this Compound. \"\"\"\n from mbuild.port import Port\n return [port for port in self.labels.values()\n if isinstance(port, Port)]\n\n def available_ports(self):\n \"\"\"Return all unoccupied Ports referenced by this Compound. \"\"\"\n from mbuild.port import Port\n return [port for port in self.labels.values()\n if isinstance(port, Port) and not port.used]\n\n def bonds(self):\n \"\"\"A list of all Bonds in the Compound and sub-Compounds. \"\"\"\n if self.root.bond_graph:\n if self.root == self:\n return self.root.bond_graph.edges_iter()\n else:\n return self.root.bond_graph.subgraph(self.particles()).edges_iter()\n else:\n return iter(())\n\n @property\n def n_bonds(self):\n \"\"\"Return the number of Bonds in the Compound. \"\"\"\n return sum(1 for _ in self.bonds())\n\n def add_bond(self, particle_pair):\n \"\"\"\"\"\"\n if self.root.bond_graph is None:\n self.root.bond_graph = BondGraph()\n\n self.root.bond_graph.add_edge(particle_pair[0], particle_pair[1])\n\n def generate_bonds(self, name_a, name_b, dmin, dmax):\n \"\"\"Add Bonds between all pairs of types a/b within [dmin, dmax]. \"\"\"\n particle_kdtree = PeriodicCKDTree(data=self.xyz, bounds=self.periodicity)\n particle_array = np.array(list(self.particles()))\n added_bonds = list()\n for p1 in self.particles_by_name(name_a):\n nearest = self.particles_in_range(p1, dmax, max_particles=20,\n particle_kdtree=particle_kdtree,\n particle_array=particle_array)\n for p2 in nearest:\n if p2 == p1:\n continue\n bond_tuple = (p1, p2) if id(p1) < id(p2) else (p2, p1)\n if bond_tuple in added_bonds:\n continue\n if (p2.name == name_b) and (dmin <= self.min_periodic_distance(p2.pos, p1.pos) <= dmax):\n self.add_bond((p1, p2))\n added_bonds.append(bond_tuple)\n\n def remove_bond(self, particle_pair):\n if self.root.bond_graph is None or not self.root.bond_graph.has_edge(*particle_pair):\n warn(\"Bond between {} and {} doesn't exist!\".format(*particle_pair))\n return\n self.root.bond_graph.remove_edge(*particle_pair)\n\n @property\n def pos(self):\n if not self.children:\n return self._pos\n else:\n return self.center\n\n @pos.setter\n def pos(self, value):\n if not self.children:\n self._pos = value\n else:\n raise MBuildError('Cannot set position on a Compound that has'\n ' children.')\n\n @property\n def periodicity(self):\n return self._periodicity\n\n @periodicity.setter\n def periodicity(self, periods):\n self._periodicity = np.array(periods)\n\n @property\n def xyz(self):\n \"\"\"Return all particle coordinates in this compound.\n\n Returns\n -------\n pos : np.ndarray, shape=(n, 3)\n Array with the positions of all particles.\n \"\"\"\n if not self.children:\n pos = np.expand_dims(self._pos, axis=0)\n else:\n arr = np.fromiter(itertools.chain.from_iterable(\n particle.pos for particle in self.particles()), dtype=float)\n pos = arr.reshape((-1, 3))\n return pos\n\n @property\n def xyz_with_ports(self):\n \"\"\"Return all particle coordinates in this compound including ports. \"\"\"\n if not self.children:\n pos = self._pos\n else:\n arr = np.fromiter(itertools.chain.from_iterable(\n particle.pos for particle in self.particles(include_ports=True)), dtype=float)\n pos = arr.reshape((-1, 3))\n return pos\n\n @property\n def center(self):\n \"\"\"The cartesian center of the Compound based on its Atoms. \"\"\"\n if self.xyz.any():\n return np.mean(self.xyz, axis=0)\n\n @property\n def boundingbox(self):\n \"\"\"Compute the bounding box of the compound. \"\"\"\n xyz = self.xyz\n return Box(mins=xyz.min(axis=0), maxs=xyz.max(axis=0))\n\n def min_periodic_distance(self, xyz0, xyz1):\n \"\"\"Vectorized distance calculation considering minimum image. \"\"\"\n d = np.abs(xyz0 - xyz1)\n d = np.where(d > 0.5 * self.periodicity, self.periodicity - d, d)\n return np.sqrt((d ** 2).sum(axis=-1))\n\n def particles_in_range(self, compound, dmax, max_particles=20, particle_kdtree=None, particle_array=None):\n \"\"\"Find particles within a specified range of another particle. \"\"\"\n if particle_kdtree is None:\n particle_kdtree = PeriodicCKDTree(data=self.xyz, bounds=self.periodicity)\n _, idxs = particle_kdtree.query(compound.pos, k=max_particles, distance_upper_bound=dmax)\n idxs = idxs[idxs != self.n_particles]\n if particle_array is None:\n particle_array = np.array(list(self.particles()))\n return particle_array[idxs]\n\n def view_hierarchy(self, show_ports=False):\n \"\"\"Visualize a compound hierarchy as a tree. \"\"\"\n raise NotImplementedError('Coming soon!')\n\n def visualize(self, show_ports=False):\n \"\"\"Visualize the Compound using nglview. \"\"\"\n if run_from_ipython():\n structure = self.to_trajectory(show_ports)\n return nglview.show_mdtraj(structure)\n else:\n try:\n \"\"\"Visualize the Compound using imolecule. \"\"\"\n import imolecule\n json_mol = self._to_json(show_ports)\n return imolecule.draw(json_mol, format='json', shader='lambert',\n drawing_type='ball and stick', camera_type='perspective',\n element_properties=None)\n\n except ImportError:\n raise RuntimeError('Visualization is only supported in Jupyter '\n 'Notebooks.')\n\n def _to_json(self, show_ports=False):\n import imolecule\n atoms = list()\n\n for idx, particle in enumerate(self._particles(include_ports=show_ports)):\n particle.index = idx\n atoms.append({'element': particle.name, 'location': list(np.asarray(particle.pos, dtype=float) * 10)})\n\n bonds = [{'atoms': [atom1.index, atom2.index], 'order': 1} for atom1, atom2 in self.bonds()]\n output = {'name': self.name, 'atoms': atoms, 'bonds': bonds}\n\n # Remove the index attribute on particles.\n for idx, particle in enumerate(self.particles()):\n if not show_ports and particle.port_particle:\n continue\n del particle.index\n\n return imolecule.json_formatter.compress(output)\n\n def update_coordinates(self, filename):\n \"\"\"Update the coordinates of this Compound from a file. \"\"\"\n load(filename, compound=self, coords_only=True)\n\n def save(self, filename, show_ports=False, forcefield=None, **kwargs):\n \"\"\"Save the Compound to a file.\n\n Parameters\n ----------\n filename : str\n Filesystem path in which to save the trajectory. The extension or\n prefix will be parsed and will control the format.\n show_ports : bool, default=False\n Save ports contained within the compound.\n forcefield : str, default=None\n Apply a forcefield to the output file using the `foyer` package.\n\n Other Parameters\n ----------------\n force_overwrite : bool\n\n \"\"\"\n extension = os.path.splitext(filename)[-1]\n\n savers = {'.hoomdxml': self.save_hoomdxml,\n '.gro': self.save_gromacs,\n '.top': self.save_gromacs,\n '.lammps': self.save_lammpsdata,\n '.lmp': self.save_lammpsdata}\n\n try:\n saver = savers[extension]\n except KeyError: # TODO: better reporting\n saver = None\n\n structure = self.to_parmed(show_ports, **kwargs)\n if saver: # mBuild/InterMol supported saver.\n return saver(filename, structure, forcefield, **kwargs)\n else: # ParmEd supported saver.\n return structure.save(filename, **kwargs)\n\n def save_hoomdxml(self, filename, structure, forcefield, box=None, **kwargs):\n \"\"\" \"\"\"\n if forcefield:\n from foyer.forcefield import apply_forcefield\n structure = apply_forcefield(structure, forcefield=forcefield)\n if not box:\n box = self.boundingbox\n for dim, val in enumerate(self.periodicity):\n if val:\n box.lengths[dim] = val\n box.maxs[dim] = val\n box.mins[dim] = 0.0\n if not val:\n box.maxs[dim] += 0.25\n box.mins[dim] -= 0.25\n box.lengths[dim] += 0.5\n write_hoomdxml(structure, filename, forcefield, box, **kwargs)\n\n def save_gromacs(self, filename, structure, forcefield, force_overwrite=False, **kwargs):\n \"\"\" \"\"\"\n\n # Create separate file paths for .gro and .top\n filepath, filename = os.path.split(filename)\n basename = os.path.splitext(filename)[0]\n top_filename = os.path.join(filepath, basename + '.top')\n gro_filename = os.path.join(filepath, basename + '.gro')\n\n if forcefield:\n from foyer.forcefield import apply_forcefield\n structure = apply_forcefield(structure, forcefield=forcefield)\n structure.save(top_filename, 'gromacs', **kwargs)\n structure.save(gro_filename, 'gro', **kwargs)\n\n def save_lammpsdata(self, filename, structure, forcefield, box=None, **kwargs):\n \"\"\" \"\"\"\n if forcefield:\n from foyer.forcefield import apply_forcefield\n structure = apply_forcefield(structure, forcefield=forcefield)\n if not box:\n box = self.boundingbox\n for dim, val in enumerate(self.periodicity):\n if val:\n box.lengths[dim] = val\n box.maxs[dim] = val\n box.mins[dim] = 0.0\n if not val:\n box.maxs[dim] += 0.25\n box.mins[dim] -= 0.25\n box.lengths[dim] += 0.5\n\n write_lammpsdata(structure, filename, forcefield, box, **kwargs)\n\n # Interface to Trajectory for reading/writing .pdb and .mol2 files.\n # -----------------------------------------------------------------\n def from_trajectory(self, traj, frame=-1, coords_only=False):\n \"\"\"Extract atoms and bonds from a md.Trajectory.\n\n Will create sub-compounds for every chain if there is more than one\n and sub-sub-compounds for every residue.\n\n Parameters\n ----------\n traj : md.Trajectory\n The trajectory to load.\n frame : int\n The frame to take coordinates from.\n;\n \"\"\"\n if coords_only:\n if traj.n_atoms != self.n_particles:\n raise ValueError('Number of atoms in {traj} does not match'\n ' {self}'.format(**locals()))\n atoms_particles = zip(traj.topology.atoms,\n self._particles(include_ports=False))\n for mdtraj_atom, particle in atoms_particles:\n particle.pos = traj.xyz[frame, mdtraj_atom.index]\n return\n\n atom_mapping = dict()\n for chain in traj.topology.chains:\n if traj.topology.n_chains > 1:\n chain_compound = Compound()\n self.add(chain_compound, 'chain[$]')\n else:\n chain_compound = self\n for res in chain.residues:\n for atom in res.atoms:\n new_atom = Particle(name=str(atom.name), pos=traj.xyz[frame, atom.index])\n chain_compound.add(new_atom, label='{0}[$]'.format(atom.name))\n atom_mapping[atom] = new_atom\n\n for mdtraj_atom1, mdtraj_atom2 in traj.topology.bonds:\n atom1 = atom_mapping[mdtraj_atom1]\n atom2 = atom_mapping[mdtraj_atom2]\n self.add_bond((atom1, atom2))\n\n if np.any(traj.unitcell_lengths) and np.any(traj.unitcell_lengths[0]):\n self.periodicity = traj.unitcell_lengths[0]\n else:\n self.periodicity = np.array([0., 0., 0.])\n\n def to_trajectory(self, show_ports=False, chain_types=None,\n residue_types=None, **kwargs):\n \"\"\"Convert to an md.Trajectory and flatten the compound.\n\n Parameters\n ----------\n show_ports : bool, optional, default=False\n Include all port atoms when converting to trajectory.\n\n Returns\n -------\n trajectory : md.Trajectory\n\n See also\n --------\n _to_topology\n\n \"\"\"\n import mdtraj as md\n\n atom_list = [particle for particle in self.particles(show_ports)]\n\n top = self._to_topology(atom_list, chain_types, residue_types)\n\n # Coordinates.\n xyz = np.ndarray(shape=(1, top.n_atoms, 3), dtype='float')\n for idx, atom in enumerate(atom_list):\n xyz[0, idx] = atom.pos\n\n # Unitcell information.\n box = self.boundingbox\n unitcell_lengths = np.empty(3)\n for dim, val in enumerate(self.periodicity):\n if val:\n unitcell_lengths[dim] = val\n else:\n unitcell_lengths[dim] = box.lengths[dim]\n\n return md.Trajectory(xyz, top, unitcell_lengths=unitcell_lengths,\n unitcell_angles=np.array([90, 90, 90]))\n\n def _to_topology(self, atom_list, chain_types=None, residue_types=None):\n \"\"\"Create a mdtraj.Topology from a Compound.\n\n Parameters\n ----------\n atom_list :\n chain_types :\n residue_types :\n\n Returns\n -------\n top : mtraj.Topology\n\n \"\"\"\n from mdtraj.core.element import get_by_symbol\n from mdtraj.core.topology import Topology\n\n if isinstance(chain_types, Compound):\n chain_types = [Compound]\n if isinstance(chain_types, (list, set)):\n chain_types = tuple(chain_types)\n\n if isinstance(residue_types, Compound):\n residue_types = [Compound]\n if isinstance(residue_types, (list, set)):\n residue_types = tuple(residue_types)\n top = Topology()\n atom_mapping = {}\n\n default_chain = top.add_chain()\n default_residue = top.add_residue('RES', default_chain)\n\n last_residue_compound = None\n last_chain_compound = None\n last_residue = None\n last_chain = None\n\n for atom in atom_list:\n # Chains\n for parent in atom.ancestors():\n if chain_types and isinstance(parent, chain_types):\n if parent != last_chain_compound:\n last_chain_compound = parent\n last_chain = top.add_chain()\n last_chain_default_residue = top.add_residue('RES', last_chain)\n last_chain.compound = last_chain_compound\n break\n else:\n last_chain = default_chain\n last_chain.compound = last_chain_compound\n\n # Residues\n for parent in atom.ancestors():\n if residue_types and isinstance(parent, residue_types):\n if parent != last_residue_compound:\n last_residue_compound = parent\n last_residue = top.add_residue(parent.__class__.__name__, last_chain)\n last_residue.compound = last_residue_compound\n break\n else:\n if last_chain != default_chain:\n last_residue = last_chain_default_residue\n else:\n last_residue = default_residue\n last_residue.compound = last_residue_compound\n\n # Add the actual atoms\n try:\n elem = get_by_symbol(atom.name)\n except KeyError:\n elem = get_by_symbol(\"VS\")\n at = top.add_atom(atom.name, elem, last_residue)\n at.charge = atom.charge\n atom_mapping[atom] = at\n\n # Remove empty default residues.\n chains_to_remove = [chain for chain in top.chains if chain.n_atoms == 0]\n residues_to_remove = [res for res in top.residues if res.n_atoms == 0]\n for chain in chains_to_remove:\n top._chains.remove(chain)\n for res in residues_to_remove:\n for chain in top.chains:\n try:\n chain._residues.remove(res)\n except ValueError: # Already gone.\n pass\n\n for atom1, atom2 in self.bonds():\n # Ensure that both atoms are part of the compound. This becomes an\n # issue if you try to convert a sub-compound to a topology which is\n # bonded to a different subcompound.\n if all(a in atom_mapping.keys() for a in [atom1, atom2]):\n top.add_bond(atom_mapping[atom1], atom_mapping[atom2])\n return top\n\n def from_parmed(self, structure, coords_only=False):\n \"\"\"Extract atoms and bonds from a pmd.Structure.\n\n Will create sub-compounds for every chain if there is more than one\n and sub-sub-compounds for every residue.\n\n Parameters\n ----------\n structure : pmd.Structure\n The structure to load.\n coords_only : bool\n Set preexisting atoms in compound to coordinates given by structure.\n\n \"\"\"\n if coords_only:\n if len(structure.atoms) != self.n_particles:\n raise ValueError('Number of atoms in {structure} does not match'\n ' {self}'.format(**locals()))\n atoms_particles = zip(structure.atoms,\n self._particles(include_ports=False))\n for parmed_atom, particle in atoms_particles:\n particle.pos = structure.coordinates[parmed_atom.idx]\n return\n\n atom_mapping = dict()\n chain_id = None\n chains = defaultdict(list)\n for residue in structure.residues:\n chains[residue.chain].append(residue)\n\n for chain, residues in chains.items():\n if len(chains) > 1:\n chain_compound = Compound()\n self.add(chain_compound, chain_id)\n else:\n chain_compound = self\n for residue in residues:\n for atom in residue.atoms:\n new_atom = Particle(name=str(atom.name), pos=structure.coordinates[atom.idx])\n chain_compound.add(new_atom, label='{0}[$]'.format(atom.name))\n atom_mapping[atom] = new_atom\n\n for bond in structure.bonds:\n atom1 = atom_mapping[bond.atom1]\n atom2 = atom_mapping[bond.atom2]\n self.add_bond((atom1, atom2))\n\n if structure.box is not None:\n self.periodicity = structure.box[0:3]\n else:\n self.periodicity = np.array([0., 0., 0.])\n\n def to_parmed(self, title='', **kwargs):\n \"\"\"Create a ParmEd Structure from a Compound. \"\"\"\n structure = pmd.Structure()\n structure.title = title if title else self.name\n atom_mapping = {} # For creating bonds below\n for atom in self.particles():\n atomic_number = None\n try:\n atomic_number = AtomicNum[atom.name]\n except KeyError:\n element = element_by_name(atom.name)\n warn('Guessing that {} is element: {}'.format(atom, element))\n else:\n element = atom.name\n\n atomic_number = atomic_number or AtomicNum[element]\n mass = Mass[element]\n pmd_atom = pmd.Atom(atomic_number=atomic_number, name=atom.name,\n mass=mass)\n pmd_atom.xx, pmd_atom.xy, pmd_atom.xz = atom.pos * 10 # Angstroms\n structure.add_atom(pmd_atom, resname='RES', resnum=1)\n atom_mapping[atom] = pmd_atom\n\n for atom1, atom2 in self.bonds():\n bond = pmd.Bond(atom_mapping[atom1], atom_mapping[atom2])\n structure.bonds.append(bond)\n\n box = self.boundingbox\n box_vector = np.empty(6)\n box_vector[3] = box_vector[4] = box_vector[5] = 90.0\n for dim, val in enumerate(self.periodicity):\n if val:\n box_vector[dim] = val * 10\n else:\n box_vector[dim] = box.lengths[dim] * 10 + 5\n structure.box = box_vector\n return structure\n\n def to_intermol(self, molecule_types=None):\n \"\"\"Create an InterMol system from a Compound.\n\n Parameters\n ----------\n molecule_types : list or tuple of subclasses of Compound\n\n Returns\n -------\n intermol_system : intermol.system.System\n\n \"\"\"\n from intermol.atom import Atom as InterMolAtom\n from intermol.molecule import Molecule\n from intermol.system import System\n import simtk.unit as u\n\n if isinstance(molecule_types, list):\n molecule_types = tuple(molecule_types)\n elif molecule_types is None:\n molecule_types = (type(self),)\n intermol_system = System()\n\n last_molecule_compound = None\n for atom_index, atom in enumerate(self.particles()):\n for parent in atom.ancestors():\n # Don't want inheritance via isinstance().\n if type(parent) in molecule_types:\n # Check if we have encountered this molecule type before.\n if parent.name not in intermol_system.molecule_types:\n self._add_intermol_molecule_type(intermol_system, parent)\n if parent != last_molecule_compound:\n last_molecule_compound = parent\n last_molecule = Molecule(name=parent.name)\n intermol_system.add_molecule(last_molecule)\n break\n else:\n # Should never happen if molecule_types only contains type(self)\n raise ValueError('Found an atom {} that is not part of any of '\n 'the specified molecule types {}'.format(\n atom, molecule_types))\n\n # Add the actual intermol atoms.\n intermol_atom = InterMolAtom(atom_index + 1, name=atom.name,\n residue_index=1, residue_name='RES')\n intermol_atom.position = atom.pos * u.nanometers\n last_molecule.add_atom(intermol_atom)\n return intermol_system\n\n @staticmethod\n def _add_intermol_molecule_type(intermol_system, parent):\n \"\"\"Create a molecule type for the parent and add bonds. \"\"\"\n from intermol.moleculetype import MoleculeType\n from intermol.forces.bond import Bond as InterMolBond\n\n molecule_type = MoleculeType(name=parent.name)\n intermol_system.add_molecule_type(molecule_type)\n\n for index, parent_atom in enumerate(parent.particles()):\n parent_atom.index = index + 1\n\n for atom1, atom2 in parent.bonds():\n intermol_bond = InterMolBond(atom1.index, atom2.index)\n molecule_type.bonds.add(intermol_bond)\n\n def __getitem__(self, selection):\n if isinstance(selection, integer_types):\n return list(self.particles())[selection]\n if isinstance(selection, string_types):\n return self.labels.get(selection)\n\n def __repr__(self):\n descr = list('<')\n descr.append(self.name + ' ')\n\n if self.children:\n descr.append('{:d} particles, '.format(self.n_particles))\n if any(self.periodicity):\n descr.append('periodicity: {}, '.format(self.periodicity))\n else:\n descr.append('non-periodic, ')\n else:\n descr.append('pos=({: .4f},{: .4f},{: .4f}), '.format(self.pos[0], self.pos[1], self.pos[2]))\n\n descr.append('{:d} bonds, '.format(self.n_bonds))\n\n descr.append('id: {}>'.format(id(self)))\n return ''.join(descr)\n\n def _clone(self, clone_of=None, root_container=None):\n \"\"\"A faster alternative to deepcopying.\n\n Does not resolve circular dependencies. This should be safe provided\n you never try to add the top of a Compound hierarchy to a\n sub-Compound. Clones compound hierarchy only, not the bonds.\n \"\"\"\n if root_container is None:\n root_container = self\n if clone_of is None:\n clone_of = dict()\n\n # If this compound has already been cloned, return that.\n if self in clone_of:\n return clone_of[self]\n\n # Otherwise we make a new clone.\n cls = self.__class__\n newone = cls.__new__(cls)\n\n # Remember that we're cloning the new one of self.\n clone_of[self] = newone\n\n newone.name = deepcopy(self.name)\n newone.periodicity = deepcopy(self.periodicity)\n newone._pos = deepcopy(self._pos)\n newone.charge = deepcopy(self.charge)\n newone.port_particle = deepcopy(self.port_particle)\n if hasattr(self, 'index'):\n newone.index = deepcopy(self.index)\n\n if self.children is None:\n newone.children = None\n else:\n newone.children = OrderedSet()\n # Parent should be None initially.\n newone.parent = None\n newone.labels = OrderedDict()\n newone.referrers = set()\n newone.bond_graph = None\n\n # Add children to clone.\n if self.children:\n for child in self.children:\n newchild = child._clone(clone_of, root_container)\n newone.children.add(newchild)\n newchild.parent = newone\n\n # Copy labels, except bonds with atoms outside the hierarchy.\n if self.labels:\n for label, compound in self.labels.items():\n if not isinstance(compound, list):\n newone.labels[label] = compound._clone(clone_of, root_container)\n compound.referrers.add(clone_of[compound])\n else:\n # compound is a list of compounds, so we create an empty\n # list, and add the clones of the original list elements.\n newone.labels[label] = []\n for subpart in compound:\n newone.labels[label].append(subpart._clone(clone_of, root_container))\n # Referrers must have been handled already, or the will be handled\n\n return newone\n\n def _clone_bonds(self, clone_of=None):\n newone = clone_of[self]\n for c1, c2 in self.bonds():\n newone.add_bond((clone_of[c1], clone_of[c2]))\n\n\nParticle = Compound\n", "sub_path": "mbuild/compound.py", "file_name": "compound.py", "file_ext": "py", "file_size_in_byte": 40276, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.path.realpath", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "sys.modules", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "mdtraj.load", "line_number": 58, "usage_type": "call"}, {"api_name": "six.string_types", "line_number": 133, "usage_type": "argument"}, {"api_name": "numpy.array", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 147, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 149, "usage_type": "call"}, {"api_name": "oset.oset", "line_number": 154, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 155, "usage_type": "call"}, {"api_name": "collections.Iterable", "line_number": 251, "usage_type": "attribute"}, {"api_name": "six.string_types", "line_number": 252, "usage_type": "argument"}, {"api_name": "oset.oset", "line_number": 264, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 266, "usage_type": "call"}, {"api_name": "mbuild.exceptions.MBuildError", "line_number": 270, "usage_type": "call"}, {"api_name": "mbuild.exceptions.MBuildError", "line_number": 298, "usage_type": "call"}, {"api_name": "mbuild.port.Port", "line_number": 363, "usage_type": "name"}, {"api_name": "mbuild.port.Port", "line_number": 369, "usage_type": "name"}, {"api_name": "mbuild.bond_graph.BondGraph", "line_number": 389, "usage_type": "call"}, {"api_name": "mbuild.periodic_kdtree.PeriodicCKDTree", "line_number": 395, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 396, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 414, "usage_type": "call"}, {"api_name": "mbuild.exceptions.MBuildError", "line_number": 430, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 439, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 451, "usage_type": "call"}, {"api_name": "numpy.fromiter", "line_number": 453, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 453, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 453, "usage_type": "attribute"}, {"api_name": "numpy.fromiter", "line_number": 464, "usage_type": "call"}, {"api_name": "itertools.chain.from_iterable", "line_number": 464, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 464, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 473, "usage_type": "call"}, {"api_name": "mbuild.box.Box", "line_number": 479, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 483, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 484, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 485, "usage_type": "call"}, {"api_name": "mbuild.periodic_kdtree.PeriodicCKDTree", "line_number": 490, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 494, "usage_type": "call"}, {"api_name": "mbuild.utils.io.run_from_ipython", "line_number": 503, "usage_type": "call"}, {"api_name": "nglview.show_mdtraj", "line_number": 505, "usage_type": "call"}, {"api_name": "imolecule.draw", "line_number": 511, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 525, "usage_type": "call"}, {"api_name": "imolecule.json_formatter.compress", "line_number": 536, "usage_type": "call"}, {"api_name": "imolecule.json_formatter", "line_number": 536, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 560, "usage_type": "call"}, {"api_name": "os.path", "line_number": 560, "usage_type": "attribute"}, {"api_name": "foyer.forcefield.apply_forcefield", "line_number": 583, "usage_type": "call"}, {"api_name": "mbuild.formats.hoomdxml.write_hoomdxml", "line_number": 595, "usage_type": "call"}, {"api_name": "os.path.split", "line_number": 601, "usage_type": "call"}, {"api_name": "os.path", "line_number": 601, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 602, "usage_type": "call"}, {"api_name": "os.path", "line_number": 602, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 603, "usage_type": "call"}, {"api_name": "os.path", "line_number": 603, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 604, "usage_type": "call"}, {"api_name": "os.path", "line_number": 604, "usage_type": "attribute"}, {"api_name": "foyer.forcefield.apply_forcefield", "line_number": 608, "usage_type": "call"}, {"api_name": "foyer.forcefield.apply_forcefield", "line_number": 616, "usage_type": "call"}, {"api_name": "mbuild.formats.lammpsdata.write_lammpsdata", "line_number": 629, "usage_type": "call"}, {"api_name": "{'Port': 'mbuild.port.Port', 'imolecule': 'imolecule', 'apply_forcefield': 'foyer.forcefield.apply_forcefield'}", "line_number": 660, "usage_type": "call"}, {"api_name": "numpy.any", "line_number": 675, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 678, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 705, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 711, "usage_type": "call"}, {"api_name": "mdtraj.Trajectory", "line_number": 718, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 719, "usage_type": "call"}, {"api_name": "mdtraj.core.topology.Topology", "line_number": 747, "usage_type": "call"}, {"api_name": "mdtraj.core.element.get_by_symbol", "line_number": 789, "usage_type": "call"}, {"api_name": "mdtraj.core.element.get_by_symbol", "line_number": 791, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 842, "usage_type": "call"}, {"api_name": "{'Port': 'mbuild.port.Port', 'imolecule': 'imolecule', 'apply_forcefield': 'foyer.forcefield.apply_forcefield', 'md': 'mdtraj', 'get_by_symbol': 'mdtraj.core.element.get_by_symbol', 'Topology': 'mdtraj.core.topology.Topology'}", "line_number": 848, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 866, "usage_type": "call"}, {"api_name": "parmed.Structure", "line_number": 870, "usage_type": "call"}, {"api_name": "parmed.periodic_table.AtomicNum", "line_number": 876, "usage_type": "name"}, {"api_name": "parmed.periodic_table.element_by_name", "line_number": 878, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 879, "usage_type": "call"}, {"api_name": "parmed.periodic_table.AtomicNum", "line_number": 883, "usage_type": "name"}, {"api_name": "parmed.periodic_table.Mass", "line_number": 884, "usage_type": "name"}, {"api_name": "parmed.Atom", "line_number": 885, "usage_type": "call"}, {"api_name": "parmed.Bond", "line_number": 892, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 896, "usage_type": "call"}, {"api_name": "intermol.system.System", "line_number": 927, "usage_type": "call"}, {"api_name": "intermol.molecule.Molecule", "line_number": 939, "usage_type": "call"}, {"api_name": "intermol.atom.Atom", "line_number": 949, "usage_type": "call"}, {"api_name": "simtk.unit.nanometers", "line_number": 951, "usage_type": "attribute"}, {"api_name": "simtk.unit", "line_number": 951, "usage_type": "name"}, {"api_name": "intermol.moleculetype.MoleculeType", "line_number": 961, "usage_type": "call"}, {"api_name": "intermol.forces.bond.Bond", "line_number": 968, "usage_type": "call"}, {"api_name": "six.integer_types", "line_number": 972, "usage_type": "argument"}, {"api_name": "six.string_types", "line_number": 974, "usage_type": "argument"}, {"api_name": "copy.deepcopy", "line_number": 1018, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 1019, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 1020, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 1021, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 1022, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 1024, "usage_type": "call"}, {"api_name": "oset.oset", "line_number": 1029, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 1032, "usage_type": "call"}]} +{"seq_id": "287693640", "text": "import urllib.request\nfrom bs4 import BeautifulSoup\nfrom pandas import DataFrame\n\nmax_page=100\nresult=[]\n# index=\"index\"\n\nfor page_idx in range(1,max_page+1):\n #페이지에 파라미터가 들어간다. get 방식 post방식 파라미터 물고 들어가는게 get 방식\n Cheogajip_URL='http://www.cheogajip.co.kr/establish02_02.html?page=%s&search=&keyword='%str(page_idx)\n print(Cheogajip_URL)\n\n response = urllib.request.urlopen(Cheogajip_URL)\n soupData = BeautifulSoup(response.read().decode('CP949'),'html.parser')\n\n store_trs = soupData.find_all('tr',attrs={'align':'center','bgcolor':'#FFFFFF'})\n print(\"End\")\n # if (store_trs):#차이 없음\n if store_trs:\n for store_tr in store_trs:\n tr_tag = list(store_tr.strings)\n\n # if(tr_tag[1].count('[휴점]')==0):#체인점 명이 tr_tag[1]에 있음.\n if not (tr_tag[1].count('[휴점]')):\n store_name = tr_tag[1]\n store_address = tr_tag[3]\n phone_number = tr_tag[5]\n result.append([store_name]+[store_address]+[phone_number])\n #result.append(store_name+','+store_address+','+phone_number)\ncheogajip_table = DataFrame(result, columns=('store_name','store_address','phone_number'))\ncheogajip_table.to_csv(\"cheogajip.csv\",encoding=\"cp949\",mode='w',index=True)\n\nprint(\"end\")", "sub_path": "samples/Parsing_Chicken.py", "file_name": "Parsing_Chicken.py", "file_ext": "py", "file_size_in_byte": 1368, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "urllib.request.request.urlopen", "line_number": 14, "usage_type": "call"}, {"api_name": "urllib.request.request", "line_number": 14, "usage_type": "attribute"}, {"api_name": "urllib.request", "line_number": 14, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 15, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 31, "usage_type": "call"}]} +{"seq_id": "276064334", "text": "import redis\nimport time\nimport yaml\nfrom collections import deque\n\nclass Config:\n def __init__(self, fn_config:str):\n self.config_flist = self.init_flist()\n self.config_optlist = self.init_optlist()\n self.config_slist = self.init_slist(fn_config)\n print(self.config_flist)\n print(self.config_optlist)\n print(self.config_slist)\n\n def init_flist(self):\n conn = redis.Redis(host=\"168.36.1.115\", port=6380, password=\"\", charset='gb18030', errors='replace',\n decode_responses=True)\n pre = \"qdb:securityex:derivatives:\"\n code = \"CODE\"\n t = ['IC', 'IF', 'IH']\n d = dict()\n for i in t:\n for j in range(1, 5):\n d[i + \"0\" + str(j)] = conn.get(pre + i + \"0\" + str(j) + \":CODE\")\n return d\n\n def init_optlist(self):\n conn = redis.Redis(host=\"168.36.1.170\", port=6379, password=\"\", charset='gb18030', errors='replace',\n decode_responses=True)\n keys = conn.keys()\n\n re = dict()\n num = 0\n for key in keys:\n if key.startswith(\"OPLST:01\"):\n num = num + 1\n # print(key)\n code = conn.hget(key, 'InstrumentCode')\n re_key = code[7:]\n if not re_key in re.keys():\n re[re_key] = ['', '']\n if code[6] == 'P':\n re[re_key][1] = conn.hget(key, 'InstrumentID')\n if code[6] == 'C':\n re[re_key][0] = conn.hget(key, 'InstrumentID')\n # print(num)\n return re\n\n def init_slist(self,filename: str):\n with open(filename, 'r') as file:\n cont = file.read()\n res = yaml.load(cont, Loader=yaml.FullLoader)\n return res['config']['slist']\n\nclass UpperLower:\n def __init__(self, interval_name:str, upp_pro:float, low_pro:float):\n self.interval_dict = {\"A30m\":1800,'A10s':10,'A5m':300}\n self.interval_name = interval_name\n self.interval = self.interval_dict[self.interval_name]\n self.data_queue = deque(maxlen=self.interval)\n self.upper = None\n self.low = None\n self.upp_pro = upp_pro\n self.low_pro = low_pro\n\n def appendData(self,data:int):\n if data == None:\n self.data_queue = deque(maxlen=self.interval)\n self.upper = None\n self.low = None\n else:\n self.data_queue.append(float(data))\n\n def computeUpLow(self):\n if len(self.data_queue) == self.interval:\n print(\"hhh\")\n print(self.data_queue)\n t_l = list(self.data_queue)\n t_l = sorted(t_l)\n self.upper = t_l[int(len(t_l) * self.upp_pro)]\n self.low = t_l[int(len(t_l) * self.low_pro)]\n elif len(self.data_queue) > 0:\n t_l = list(self.data_queue)\n t_l = sorted(t_l)\n self.upper = t_l[int(len(t_l)* self.upp_pro) ]\n self.low = t_l[int(len(t_l)* self.low_pro) ]\n print(\"未完成数据累计,显示当前数据\")\n else:\n print(\"Deque中没有数据\")\n\n def ifUppLow(self):\n if self.upper == None or self.low == None:\n return False\n else:\n return True\n\n def getUppLow(self):\n return self.upper, self.low\n\n\nclass DateType:\n def __init__(self, keyType:str, keyname:str):\n self.keyType = keyType #> :A5:....\n self.keyname = keyname #> LATEST, SP1, BP1\n self.data = None\n\n\nclass Flux:\n def __init__(self, conn_r):\n self.p_r = conn_r.pipeline(transaction=False)\n\n def getBatchData(self,dt_list:list,cur_ts:int):\n prefix = \"MDLD:\" + str(cur_ts)\n for dt in dt_list:\n # print(prefix+dt.keyType)\n self.p_r.hmget(prefix+dt.keyType,dt.keyname)\n return self.p_r.execute()\n\n def writeBatchData(self,dt_list:list,cur_ts:int):\n prefix = \"MDLD:\" + str(cur_ts)\n for dt in dt_list:\n self.p_r.hmset(prefix + dt.keyType, dt.data)\n self.p_r.execute()\n\n def getData(self, cur_ts:int,dt:DateType):\n prefix = \"MDLD:\" + str(cur_ts)\n self.dt = dt\n self.p_r.hget(prefix + self.dt.keyType,self.dt.keyname)\n self.dt.data = self.p_r.execute()[0]\n print(self.dt.data)\n if self.dt.data == None:\n print(\"Flux查询数据失效\")\n return dt.data\n\n def writeData(self,cur_ts:int,data:dict,dt:DateType):\n prefix = \"MDLD:\" + str(cur_ts)\n self.p_r.hmset(prefix + self.dt.keyType,data)\n self.p_r.execute()\n\n def sendWriteOrder(self,cur_ts:int, dt:DateType,data:dict):\n prefix =\"MDLD:\" + str(cur_ts)\n self.p_r.hmset(prefix + dt.keyType,data)\n\n def fluxExecute(self):\n self.p_r.execute()\n\nclass UpperLowControl:\n def __init__(self, flux:Flux, work_period, interval,upp_pro:float, low_pro:float):\n self.flux = flux\n self.upplow = UpperLower(interval,upp_pro,low_pro)\n self.work_period = work_period\n self.interval = interval\n self.currentDate = str(time.localtime().tm_year) + \"-\" + str(time.localtime().tm_mon) + \"-\" + str(time.localtime().tm_mday) #> \"2019-07-11\"\n self.config = Config(\"redis_mdld.yaml\")\n date_list = set()\n for pxname, (icode_c, icode_p) in self.config.config_optlist.items():\n date_list.add(pxname[:4]) #> “1909”\n date_list = list(date_list)\n date_list.sort() #> ['1908', '1909', '1912', '2003']\n self.oplist = list()\n\n for pxname, (icode_c, icode_p) in self.config.config_optlist.items():\n if pxname.startswith(date_list[0]):\n self.oplist.append(pxname) #> \"02750\")\n self.oplist = sorted(self.oplist,key = lambda x: int(x[-5:] * 10))\n print(self.oplist)\n\n def start(self):\n print(\"上班啦\")\n print(\"上午:\")\n self.operate(self.work_period[0])\n print(\"下午:\")\n self.operate(self.work_period[1])\n print(\"下班了\")\n\n def operate(self,interval):\n print(self.currentDate + \" \" + interval[0])\n start_time = time.mktime(time.strptime(self.currentDate + \" \" + interval[0] , \"%Y-%m-%d %H:%M:%S\"))\n end_time = time.mktime(time.strptime(self.currentDate + \" \" + interval[1], \"%Y-%m-%d %H:%M:%S\"))\n if time.time() > end_time:\n return\n ctime = time.time()\n if ctime < start_time:\n print(\"等待开盘\")\n interval = int(start_time) - time.time()\n if interval > 0:\n time.sleep(interval)\n self.run()\n\n ctime = time.time()\n if ctime >= start_time and ctime <= end_time:\n time.sleep(int(time.time()) + 1 - time.time())\n while time.time() < end_time:\n self.run()\n time1 = time.time()\n time.sleep(int(time.time()) + 1 - time.time())\n if int(time.time()) - int(time1) != 1:\n print(\"miss one\")\n self.run()\n\n def run(self):\n cur_ts = int(time.time())-int(time.mktime(time.strptime(self.currentDate + \" \" + \"00:00:00\" , \"%Y-%m-%d %H:%M:%S\"))) + 3600 - 1 # > 1563785675\n print(\"当前时间:\" + str(cur_ts))\n dt = self.getPingJiaContract(cur_ts)\n # print(\"run\",data)\n self.upplow.appendData(dt.data)\n self.upplow.computeUpLow()\n if self.upplow.ifUppLow():\n d = dict()\n d[self.upplow.interval_name + \"Upper\"], d[self.upplow.interval_name + \"Low\"] = self.upplow.getUppLow()\n print(d)\n dt.keyType = \":A5:PJ\" #> 平价\n self.flux.writeData(cur_ts,d,dt)\n else:\n print(\"上限下限未计算\")\n\n def getPe(self,cur_ts:int):\n pe = self.flux.getData(cur_ts,DateType(\":A5:\" + self.oplist[0],\"Pe\"))\n # print(pe)\n return pe\n\n def getPingJiaContract(self,cur_ts:int):\n i = 0\n pe = self.getPe(cur_ts)\n if pe == None:\n return DateType(\"\",\"LATEST\")\n else:\n pe = float(pe)\n while i < len(self.oplist):\n if pe < (int(self.oplist[i][-5:]) * 10):\n break\n else:\n i = i + 1\n if abs(pe - int(self.oplist[i-1][-5:] * 10)) >= abs(pe - int(self.oplist[i][-5:] * 10)):\n dt = DateType(\":A5:\" + self.oplist[i],'LATEST')\n data = self.flux.getData(cur_ts, dt)\n print(\"当前平价合约为\", self.oplist[i])\n return dt\n else:\n dt = DateType(\":A5:\" + self.oplist[i - 1], 'LATEST')\n data= self.flux.getData(cur_ts, dt)\n print(\"当前平价合约为\", self.oplist[i-1])\n return dt\n\n\n\n\n\n\n\n\ndef main():\n keyType = \":A5:1908M02800\"\n conn_r = redis.Redis(host=\"168.36.1.116\", port = 6379, password=\"\", charset='gb18030',errors=\"replace\",\n decode_responses=True)\n\n f = Flux(conn_r)\n # ul = UpperLowControl(f,[[\"08:30:00\",\"11:30:00\"],[\"16:22:00\",\"17:31:00\"]],10,0.9,0.1)\n # ul.start()\n\n ul = UpperLowControl(f,[[\"09:30:00\",\"11:30:01\"],[\"13:00:00\",\"15:00:01\"]],'A30m',0.9,0.1)\n ul.start()\n\n\nif __name__ == '__main__':\n main()\n\n", "sub_path": "src/RealTimeData/UpperLower2.py", "file_name": "UpperLower2.py", "file_ext": "py", "file_size_in_byte": 9306, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "redis.Redis", "line_number": 16, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 28, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 52, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 52, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 60, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 68, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 153, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 178, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 178, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 179, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 179, "usage_type": "call"}, {"api_name": "time.time", "line_number": 180, "usage_type": "call"}, {"api_name": "time.time", "line_number": 182, "usage_type": "call"}, {"api_name": "time.time", "line_number": 185, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 187, "usage_type": "call"}, {"api_name": "time.time", "line_number": 190, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 192, "usage_type": "call"}, {"api_name": "time.time", "line_number": 192, "usage_type": "call"}, {"api_name": "time.time", "line_number": 193, "usage_type": "call"}, {"api_name": "time.time", "line_number": 195, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 196, "usage_type": "call"}, {"api_name": "time.time", "line_number": 196, "usage_type": "call"}, {"api_name": "time.time", "line_number": 197, "usage_type": "call"}, {"api_name": "time.time", "line_number": 202, "usage_type": "call"}, {"api_name": "time.mktime", "line_number": 202, "usage_type": "call"}, {"api_name": "time.strptime", "line_number": 202, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 254, "usage_type": "call"}]} +{"seq_id": "546941985", "text": "#!/usr/bin/env python\n\"\"\"\nThis script will delete a bag from the storage service. Start by running:\n\n $ python3 ss_delete_bag.py --help\n\nand follow the instructions from there.\n\"\"\"\n\nimport collections\nimport datetime\nimport json\nimport os\nimport sys\nimport textwrap\n\nimport boto3\nimport click\nfrom elasticsearch.exceptions import NotFoundError as ElasticNotFoundError\nimport humanize\nfrom wellcome_storage_service import IngestNotFound\n\nfrom helpers import azure, dynamo, s3\nfrom helpers.iam import (\n ACCOUNT_ID,\n ADMIN_ROLE_ARN,\n DEV_ROLE_ARN,\n READ_ONLY_ROLE_ARN,\n create_aws_client_from_credentials,\n create_aws_client_from_role_arn,\n create_dynamo_client_from_role_arn,\n get_underlying_role_arn,\n temporary_iam_credentials,\n)\nfrom helpers.reporting import get_reporting_client\nfrom helpers.s3 import delete_s3_prefix\nfrom helpers.storage_service import lookup_ingest\n\n\n@click.command()\n@click.argument(\"ingest_id\", required=True)\n@click.option(\"--skip-azure-login\", is_flag=True, default=False)\ndef main(ingest_id, skip_azure_login):\n \"\"\"\n Delete an ingest and the corresponding bag from the storage service.\n\n There is no \"undo\" button. Please use with caution!\n\n I recommend testing with a test bag in the staging service before running\n this against prod, to check the script still works -- we don't use it very\n often, and it's possible something will have broken since the last time it\n was used.\n \"\"\"\n try:\n api_name, storage_client, ingest_data = lookup_ingest(ingest_id)\n except IngestNotFound:\n abort(f\"Could not find {ingest_id} in either API!\")\n\n space = ingest_data[\"space\"]\n external_identifier = ingest_data[\"external_identifier\"]\n version = ingest_data[\"version\"]\n date_created = ingest_data[\"date_created\"]\n\n # Prompt the user to confirm that yes, they really want to delete this bag.\n #\n # Note: the lack of a --confirm or --reason flag on this script is deliberate.\n # It adds a bit of friction to the process, so somebody can't accidentally\n # invoke this script and delete a whole pile of bags at once.\n _confirm_user_wants_to_delete_bag(\n api_name=api_name,\n space=space,\n external_identifier=external_identifier,\n version=version,\n date_created=date_created,\n )\n\n reason = _ask_reason_for_deleting_bag(\n space=space, external_identifier=external_identifier, version=version\n )\n\n # Do various other checks that this bag is correctly formed before we go\n # ahead and start deleting stuff. The deletion should be as close to atomic\n # as possible -- we shouldn't get halfway through deleting the bag and then\n # discover something is wrong; we should catch problems upfront.\n _confirm_is_latest_version_of_bag(\n storage_client,\n space=space,\n external_identifier=external_identifier,\n version=version,\n )\n\n bag = storage_client.get_bag(\n space=space, external_identifier=external_identifier, version=version\n )\n\n # We only want to delete files that were newly introduced in this version --\n # we shouldn't delete files fetch.txt'd from a prior version.\n files_to_delete = [\n f\n for f in bag[\"manifest\"][\"files\"] + bag[\"tagManifest\"][\"files\"]\n if f[\"path\"].startswith(f\"{version}/\")\n ]\n\n locations = _confirm_user_wants_to_delete_locations(bag)\n assert all(\n loc[\"prefix\"].endswith(f\"/{version}\")\n for loc in locations[\"s3\"] + [locations[\"azure\"]]\n )\n\n environment = api_name\n assert environment in (\"prod\", \"staging\")\n\n dynamo_client = create_dynamo_client_from_role_arn(role_arn=READ_ONLY_ROLE_ARN)\n\n items_to_delete = list(\n _get_dynamodb_items_to_delete(\n dynamo_client,\n environment=api_name,\n ingest_id=ingest_id,\n space=space,\n external_identifier=external_identifier,\n version=version,\n files_to_delete=files_to_delete,\n azure_location=locations[\"azure\"],\n )\n )\n\n click.echo(\"\")\n click.echo(\n \"Creating a temporary backup copy of the bag in s3://wellcomecollection-storage-infra...\"\n )\n s3.copy_s3_prefix(\n create_aws_client_from_role_arn(\"s3\", role_arn=DEV_ROLE_ARN),\n src_bucket=locations[\"s3\"][0][\"bucket\"],\n src_prefix=locations[\"s3\"][0][\"prefix\"],\n dst_bucket=\"wellcomecollection-storage-infra\",\n # Note: we drop the version and external identifier. The version is\n # a property of the storage sevrice; the external identifier is part of\n # the bag-info.txt.\n dst_prefix=f\"tmp/deleted_bags/{space}/{ingest_id}\",\n )\n\n if not skip_azure_login:\n click.echo(\"\")\n click.echo(\"Logging in to Azure...\")\n azure.az(\"login\")\n\n # At this point, we've checked everything -- we're good to go! Let's\n # make a record of the deletion we're about to do.\n dynamo_client = create_dynamo_client_from_role_arn(role_arn=DEV_ROLE_ARN)\n _record_deletion(\n dynamo_client,\n environment=api_name,\n ingest_id=ingest_id,\n space=space,\n external_identifier=external_identifier,\n version=version,\n reason=reason,\n )\n\n # And now start deleting stuff!\n _delete_reporting_cluster_entries(\n environment=environment,\n ingest_id=ingest_id,\n space=space,\n external_identifier=external_identifier,\n version=version,\n s3_location=locations[\"s3\"][0],\n files_to_delete=files_to_delete,\n )\n\n _delete_s3_objects(s3_locations=locations[\"s3\"])\n _delete_azure_blobs(azure_location=locations[\"azure\"])\n\n _delete_dynamodb_items(items_to_delete)\n\n click.echo(\"\")\n click.echo(\n click.style(\n textwrap.dedent(\n \"\"\"\n This bag has been deleted.\n\n A temporary copy has been saved in s3://wellcomecollection-platform-infra,\n but this will only be kept for 30 days.\n \"\"\"\n ).strip(),\n \"red\",\n )\n )\n\n\ndef hilight(s):\n return click.style(str(s), \"green\")\n\n\ndef abort(msg):\n sys.exit(click.style(msg, \"red\"))\n\n\ndef _confirm_user_wants_to_delete_bag(\n api_name, space, external_identifier, version, date_created\n):\n \"\"\"\n Show the user some information about the bag, and check this is really\n the bag they want to delete.\n\n It presents a prompt of the following form:\n\n This is the bag you are about to delete:\n Environment: prod\n Space: testing\n External ID: test_bag\n Version: v132\n Date created: Friday, 13 November 2020 @ 11:10 (2 days ago)\n\n Are you sure you want to delete this bag? [y/N]: y\n\n \"\"\"\n delta = humanize.naturaltime(date_created)\n\n click.echo(\"\")\n click.echo(\"This is the bag you are about to delete:\")\n click.echo(f\"Environment: {hilight(api_name)}\")\n click.echo(f\"Space: {hilight(space)}\")\n click.echo(f\"External ID: {hilight(external_identifier)}\")\n click.echo(f\"Version: {hilight(version)}\")\n\n date_str = date_created.strftime(\"%A, %d %B %Y @ %H:%M\") + \" (%s)\" % delta\n click.echo(f\"Date created: {hilight(date_str)}\")\n\n click.echo(\"\")\n click.confirm(\"Are you sure you want to delete this bag?\", abort=True)\n\n\ndef _ask_reason_for_deleting_bag(*, space, external_identifier, version):\n \"\"\"\n Ask the user why they want to delete this abg. This reason will be recorded\n for audit purposes.\n \"\"\"\n click.echo(\"\")\n bag_id = f\"{space}/{external_identifier}/{version}\"\n return click.prompt(f\"Why are you deleting {hilight(bag_id)}?\")\n\n\ndef _confirm_is_latest_version_of_bag(\n storage_client, *, space, external_identifier, version\n):\n \"\"\"\n It's possible for a later version of a bag to refer to an earlier version\n of the bag in the fetch.txt. Rather than exhaustively check for back-references\n in future versions, we simply refuse to delete any bag which isn't the latest.\n\n This also avoids creating \"holes\" in the versioning. If the latest version is N,\n then you'd expect there also to be versions 1, 2, ..., N - 1 with no gaps.\n\n \"\"\"\n latest_bag = storage_client.get_bag(\n space=space, external_identifier=external_identifier\n )\n latest_version = latest_bag[\"version\"]\n\n bag_id = f\"{space}/{external_identifier}\"\n\n click.echo(\"\")\n\n version_i = int(version[1:])\n latest_version_i = int(latest_version[1:])\n\n click.echo(\"Checking this is the latest version...\")\n if version_i == latest_version_i:\n click.echo(f\"{version} is the latest version of {bag_id}\")\n elif version_i < latest_version_i:\n abort(\n f\"The latest version of {bag_id} is {latest_version}, \"\n f\"which is newer than {version}\"\n )\n elif version_i > latest_version_i:\n abort(\n f\"Something is wrong -- the bags API only knows about {latest_version}, \"\n f\"which is older than {version}\"\n )\n\n\ndef _confirm_user_wants_to_delete_locations(bag):\n \"\"\"\n Now get the list of locations/prefixes we're going to delete from permanent\n storage. This is another place for the user to intervene if something seems\n wrong. It presents a prompt of the following form:\n\n This bag is stored in 3 locations:\n - s3://wc-storage-staging/testing/test_bag/v132\n - s3://wc-storage-staging-replica-ireland/testing/test_bag/v132\n - azure://wc-storage-staging-replica-netherlands/testing/test_bag/v132\n\n Does this look right? [y/N]: y\n\n \"\"\"\n click.echo(\"\")\n click.echo(\"Checking the locations to delete...\")\n\n location = bag[\"location\"]\n if location[\"provider\"][\"id\"] != \"amazon-s3\":\n abort(\n \"Something is wrong: the primary location of the bag isn't S3:\\n\"\n + json.dumps(location, indent=2, sort_keys=True)\n )\n\n replicas = bag[\"replicaLocations\"]\n replica_providers = {r[\"provider\"][\"id\"] for r in replicas}\n\n if len(replicas) != 2 or replica_providers != {\"amazon-s3\", \"azure-blob-storage\"}:\n abort(\n \"This script only knows how to deal with exactly two replicas: one S3, \"\n f\"one Azure. This bag has {len(replicas)}:\\n\"\n + json.dumps(replicas, indent=2, sort_keys=True)\n )\n\n s3_replica = next(r for r in replicas if r[\"provider\"][\"id\"] == \"amazon-s3\")\n azure_replica = next(\n r for r in replicas if r[\"provider\"][\"id\"] == \"azure-blob-storage\"\n )\n\n # All the paths should be the same in each bucket/container; if they're not,\n # something is dodgy about this bag.\n prefixes = {location[\"path\"], s3_replica[\"path\"], azure_replica[\"path\"]}\n if len(prefixes) != 1:\n abort(\n \"All the replicas should have the same prefix. This bag has different \"\n \"prefixes in different buckets/containers:\\n\"\n + json.dumps(\n {\"location\": location, \"replicas\": replicas}, indent=2, sort_keys=True\n )\n )\n\n # The 'prefix' returned in the locations block on a bag refers to *all* versions\n # of a bag, but when doing deletions we only want to delete a single version.\n common_prefix = os.path.join(prefixes.pop(), bag[\"version\"])\n\n try:\n azure_account = {\n \"wellcomecollection-storage-staging-replica-netherlands\": \"wecostoragestage\",\n \"wellcomecollection-storage-replica-netherlands\": \"wecostorageprod\",\n }[azure_replica[\"bucket\"]]\n except KeyError:\n abort(\n \"Unrecognised Azure container in the Azure replica location:\\n\"\n + json.dumps(azure_replica, indent=2, sort_keys=True)\n )\n\n locations = {\n \"s3\": [\n {\"bucket\": location[\"bucket\"], \"prefix\": common_prefix},\n {\"bucket\": s3_replica[\"bucket\"], \"prefix\": common_prefix},\n ],\n \"azure\": {\n \"account\": azure_account,\n \"container\": azure_replica[\"bucket\"],\n \"prefix\": common_prefix,\n },\n }\n\n loc_uris = [f\"s3://{loc['bucket']}/{loc['prefix']}\" for loc in locations[\"s3\"]] + [\n f\"azure://{locations['azure']['container']}/{locations['azure']['prefix']}\"\n ]\n\n click.echo(f\"This bag is stored in {hilight(len(loc_uris))} locations:\")\n for loc in loc_uris:\n click.echo(f\"- {hilight(loc)}\")\n click.echo(\"\")\n click.confirm(\"Does this look right?\", abort=True)\n\n return locations\n\n\nDynamoItem = collections.namedtuple(\"DynamoItem\", [\"table\", \"key\"])\n\n\ndef _get_dynamodb_items_to_delete(\n dynamo_client,\n *,\n environment,\n ingest_id,\n space,\n external_identifier,\n version,\n files_to_delete,\n azure_location,\n):\n \"\"\"\n Returns all the DynamoDB items that should be deleted, as a list of tuples\n\n (table, key)\n\n \"\"\"\n table_names = set(dynamo.list_dynamo_tables(dynamo_client))\n table_prefix = \"storage\" if environment == \"prod\" else f\"storage-{environment}\"\n\n # All the tags from the Azure verifier table. There should be one item\n # per file. Note: these will eventually be removed when Azure get supported\n # for index tags directly on blobs.\n #\n # This information can be reconstructed by running the verifier over the\n # Azure replica.\n assert all(\n f[\"path\"].startswith(f\"{version}/\") for f in files_to_delete\n ), files_to_delete\n assert azure_location[\"prefix\"].endswith(f\"/{version}\")\n\n azure_table_name = f\"storage-{environment}_azure_verifier_tags\"\n assert azure_table_name in table_names\n\n for f in files_to_delete:\n azure_uri = f\"azure://{azure_location['container']}/{azure_location['prefix']}/{f['name']}\"\n yield DynamoItem(table=azure_table_name, key={\"id\": azure_uri})\n\n # The replicas table. We delete this next -- this information can be\n # reconstructed by running the verifier over the replicas.\n replicas_table_name = f\"{table_prefix}_replicas_table\"\n assert replicas_table_name in table_names\n\n yield DynamoItem(\n table=replicas_table_name,\n key={\"id\": f\"{space}/{external_identifier}/{version}\"},\n )\n\n # The versions table. This information can be reconstructed; at this point\n # most references to this version of the bag have been deleted.\n versions_table_name = f\"{table_prefix}_versioner_versions_table\"\n assert versions_table_name in table_names\n\n yield DynamoItem(\n table=versions_table_name,\n key={\"id\": f\"{space}/{external_identifier}\", \"version\": int(version[1:])},\n )\n\n # The storage manifest table.\n manifests_table_name = dynamo.find_manifests_dynamo_table(\n dynamo_client, table_prefix=table_prefix\n )\n assert manifests_table_name in table_names\n\n yield DynamoItem(\n table=manifests_table_name,\n key={\"id\": f\"{space}/{external_identifier}\", \"version\": int(version[1:])},\n )\n\n # The ingests table\n ingests_table_name = f\"{table_prefix}-ingests\"\n yield DynamoItem(table=ingests_table_name, key={\"id\": ingest_id})\n\n\ndef _record_deletion(\n dynamo_client,\n *,\n environment,\n ingest_id,\n space,\n external_identifier,\n version,\n reason,\n):\n \"\"\"\n Create a record of this deletion event in DynamoDB.\n \"\"\"\n event = {\n \"requested_by\": get_underlying_role_arn(),\n \"deleted_at\": datetime.datetime.now().isoformat(),\n \"reason\": reason,\n }\n\n dynamo_client.update_item(\n TableName=\"deleted_bags\",\n Key={\"ingest_id\": ingest_id},\n UpdateExpression=\"\"\"\n SET\n #space = :space,\n #externalIdentifier = :externalIdentifier,\n #version = :version,\n #environment = :environment,\n #events = list_append(if_not_exists(#events, :empty_list), :event)\n \"\"\",\n ExpressionAttributeNames={\n \"#environment\": \"environment\",\n \"#space\": \"space\",\n \"#externalIdentifier\": \"externalIdentifier\",\n \"#version\": \"version\",\n \"#events\": \"events\",\n },\n ExpressionAttributeValues={\n \":environment\": environment,\n \":space\": space,\n \":externalIdentifier\": external_identifier,\n \":version\": version,\n \":event\": [event],\n \":empty_list\": [],\n },\n )\n\n\ndef _delete_reporting_cluster_entries(\n *,\n environment,\n ingest_id,\n space,\n external_identifier,\n version,\n s3_location,\n files_to_delete,\n):\n click.echo(\"\")\n click.echo(\"Deleting entries in the reporting cluster...\")\n\n index_pattern = {\"staging\": \"storage_stage_{doc}\", \"prod\": \"storage_{doc}\"}[\n environment\n ]\n\n secrets_client = create_aws_client_from_role_arn(\n \"secretsmanager\", role_arn=ADMIN_ROLE_ARN\n )\n\n # Delete the ingest.\n #\n # In particular, delete the entry in the ingests index with this ingest ID.\n #\n click.echo(\"Deleting this ingest from the ingests index...\")\n ingests_reporting_client = get_reporting_client(\n secrets_client, environment=environment, app_name=\"ingests\"\n )\n try:\n ingests_reporting_client.delete(\n index=index_pattern.format(doc=\"ingests\"), id=ingest_id\n )\n except ElasticNotFoundError:\n pass\n\n # Delete the bag.\n #\n # In particular, delete the entry in the bags index with the ID\n # {space}/{external_identifier}\n #\n # Note: this assumes a single version of each bag will be indexed. If we change\n # this, we'll need to change this code to match.\n #\n click.echo(\"Deleting this bag from the bags index...\")\n bags_reporting_client = get_reporting_client(\n secrets_client, environment=environment, app_name=\"bags\"\n )\n try:\n bags_reporting_client.delete(\n index=index_pattern.format(doc=\"bags\"), id=f\"{space}/{external_identifier}\"\n )\n except ElasticNotFoundError:\n pass\n\n # If this wasn't v1 of the bag, trigger a reindex so we get the bag in the\n # reporting cluster.\n if version != \"v1\":\n version_i = int(version[1:])\n\n reindexer_topic_arn = f\"arn:aws:sns:eu-west-1:{ACCOUNT_ID}:storage_{environment}_bag_reindexer_output\"\n sns_client = create_aws_client_from_role_arn(\"sns\", role_arn=DEV_ROLE_ARN)\n\n payload = {\n \"space\": space,\n \"externalIdentifier\": external_identifier,\n \"version\": f\"v{version_i - 1}\",\n \"type\": \"RegisteredBagNotification\",\n }\n\n sns_client.publish(\n TopicArn=reindexer_topic_arn,\n Subject=f\"Sent by {__file__}\",\n Message=json.dumps(payload),\n )\n\n # Delete all the files.\n #\n # In particular, delete all files whose key matches the common prefix\n # in all our locations.\n #\n click.echo(\"Deleting all the matching files from the files index...\")\n assert all(f[\"path\"].startswith(f\"{version}/\") for f in files_to_delete)\n files_reporting_client = get_reporting_client(\n secrets_client, environment=environment, app_name=\"files\"\n )\n\n for f in files_to_delete:\n try:\n files_reporting_client.delete(\n index=index_pattern.format(doc=\"files\"),\n id=f\"{s3_location['prefix']}/{f['name']}\",\n )\n except ElasticNotFoundError:\n pass\n\n\ndef _delete_s3_objects(*, s3_locations):\n click.echo(\"\")\n click.echo(\"Deleting objects from S3...\")\n # Now get AWS credentials to delete the S3 objects from the storage service.\n # Our standard storage-dev and storage-admin roles have a blanket Deny\n # on anything in the live storage service, so we'll have to create a one-off\n # user with the exact set of permissions we need.\n #\n # Creating a user with fine-grained permissions is an attempt to reduce the\n # risk of programming errors elsewhere screwing up the storage service -- if\n # the code gets overzealous and tries to delete extra stuff, the permissions\n # will protect us.\n policy_document = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": f\"DeleteInBucket{i}\",\n \"Effect\": \"Allow\",\n \"Action\": [\"s3:DeleteObject\"],\n \"Resource\": [f\"arn:aws:s3:::{loc['bucket']}/{loc['prefix']}*\"],\n }\n for i, loc in enumerate(s3_locations)\n ]\n + [\n {\n \"Sid\": \"ListAll\",\n \"Effect\": \"Allow\",\n \"Action\": [\"s3:List*\"],\n \"Resource\": [\"*\"],\n }\n ],\n }\n\n with temporary_iam_credentials(\n admin_role_arn=ADMIN_ROLE_ARN, policy_document=policy_document\n ) as credentials:\n s3_list_client = create_aws_client_from_role_arn(\n \"s3\", role_arn=READ_ONLY_ROLE_ARN\n )\n s3_delete_client = create_aws_client_from_credentials(\n \"s3\", credentials=credentials\n )\n\n for loc in s3_locations:\n click.echo(f\"Deleting objects in s3://{loc['bucket']}/{loc['prefix']}\")\n delete_s3_prefix(\n s3_list_client=s3_list_client,\n s3_delete_client=s3_delete_client,\n bucket=loc[\"bucket\"],\n prefix=loc[\"prefix\"],\n )\n\n\ndef _delete_azure_blobs(*, azure_location):\n click.echo(\"\")\n click.echo(\"Deleting blobs from Azure...\")\n\n click.echo(\n f\"Deleting blobs in azure://{azure_location['container']}/{azure_location['prefix']}\"\n )\n\n with azure.unlocked_azure_container(\n account=azure_location[\"account\"], container=azure_location[\"container\"]\n ):\n azure.delete_azure_prefix(\n account=azure_location[\"account\"],\n container=azure_location[\"container\"],\n prefix=azure_location[\"prefix\"],\n )\n\n\ndef _delete_dynamodb_items(items_to_delete):\n click.echo(\"\")\n click.echo(\"Deleting items from DynamoDB...\")\n\n # Now get AWS credentials to delete the DynamoDB items from the storage service.\n\n # The Azure verifier tags table may have arbitrarily many items, and the\n # entire table can be reconstructed, so it doesn't have delete protections.\n azure_verifier_tags = [\n it for it in items_to_delete if it.table.endswith(\"_azure_verifier_tags\")\n ]\n\n remaining_items = [\n it for it in items_to_delete if not it.table.endswith(\"_azure_verifier_tags\")\n ]\n\n assert len(azure_verifier_tags) + len(remaining_items) == len(items_to_delete)\n\n dynamo_client = create_dynamo_client_from_role_arn(role_arn=DEV_ROLE_ARN)\n\n # The table should be unique\n assert len({it.table for it in azure_verifier_tags}) == 1\n\n # there should always be some files, so this [0] will never throw an IndexError\n azure_verifier_tags_table = azure_verifier_tags[0].table\n\n dynamo.bulk_delete_dynamo_items(\n dynamo_client,\n table_name=azure_verifier_tags_table,\n keys=[it.key for it in azure_verifier_tags],\n )\n\n # Now go through the remaining items. There should only be a few; enough\n # that we can delete them all in one batch.\n policy_document = {\n \"Version\": \"2012-10-17\",\n \"Statement\": [\n {\n \"Sid\": f\"DeleteItem{i}\",\n \"Effect\": \"Allow\",\n \"Action\": [\"dynamodb:DeleteItem\"],\n \"Resource\": [\n f\"arn:aws:dynamodb:eu-west-1:{ACCOUNT_ID}:table/{item.table}\"\n ],\n \"Condition\": {\n \"ForAllValues:StringEquals\": {\n \"dynamodb:LeadingKeys\": [item.key[\"id\"]]\n }\n },\n }\n for i, item in enumerate(remaining_items)\n ],\n }\n\n with temporary_iam_credentials(\n admin_role_arn=ADMIN_ROLE_ARN, policy_document=policy_document\n ) as credentials:\n dynamo_client = boto3.resource(\n \"dynamodb\",\n aws_access_key_id=credentials[\"AccessKeyId\"],\n aws_secret_access_key=credentials[\"SecretAccessKey\"],\n aws_session_token=credentials[\"SessionToken\"],\n ).meta.client\n\n for item in remaining_items:\n dynamo_client.delete_item(TableName=item.table, Key=item.key)\n\n\nif __name__ == \"__main__\":\n main()\n", "sub_path": "scripts/ss_delete_bag.py", "file_name": "ss_delete_bag.py", "file_ext": "py", "file_size_in_byte": 24388, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "helpers.storage_service.lookup_ingest", "line_number": 55, "usage_type": "call"}, {"api_name": "wellcome_storage_service.IngestNotFound", "line_number": 56, "usage_type": "name"}, {"api_name": "helpers.iam.create_dynamo_client_from_role_arn", "line_number": 113, "usage_type": "call"}, {"api_name": "helpers.iam.READ_ONLY_ROLE_ARN", "line_number": 113, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 128, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 129, "usage_type": "call"}, {"api_name": "helpers.s3.copy_s3_prefix", "line_number": 132, "usage_type": "call"}, {"api_name": "helpers.s3", "line_number": 132, "usage_type": "name"}, {"api_name": "helpers.iam.create_aws_client_from_role_arn", "line_number": 133, "usage_type": "call"}, {"api_name": "helpers.iam.DEV_ROLE_ARN", "line_number": 133, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 144, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 145, "usage_type": "call"}, {"api_name": "helpers.azure.az", "line_number": 146, "usage_type": "call"}, {"api_name": "helpers.azure", "line_number": 146, "usage_type": "name"}, {"api_name": "helpers.iam.create_dynamo_client_from_role_arn", "line_number": 150, "usage_type": "call"}, {"api_name": "helpers.iam.DEV_ROLE_ARN", "line_number": 150, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 177, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 178, "usage_type": "call"}, {"api_name": "click.style", "line_number": 179, "usage_type": "call"}, {"api_name": "textwrap.dedent", "line_number": 180, "usage_type": "call"}, {"api_name": "click.command", "line_number": 40, "usage_type": "call"}, {"api_name": "click.argument", "line_number": 41, "usage_type": "call"}, {"api_name": "click.option", "line_number": 42, "usage_type": "call"}, {"api_name": "click.style", "line_number": 194, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 198, "usage_type": "call"}, {"api_name": "click.style", "line_number": 198, "usage_type": "call"}, {"api_name": "humanize.naturaltime", "line_number": 220, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 222, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 223, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 224, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 225, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 226, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 227, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 230, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 232, "usage_type": "call"}, {"api_name": "click.confirm", "line_number": 233, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 241, "usage_type": "call"}, {"api_name": "click.prompt", "line_number": 243, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 265, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 270, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 272, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 299, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 300, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 306, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 316, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 331, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 338, "usage_type": "call"}, {"api_name": "os.path", "line_number": 338, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 348, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 367, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 369, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 370, "usage_type": "call"}, {"api_name": "click.confirm", "line_number": 371, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 376, "usage_type": "call"}, {"api_name": "helpers.dynamo.list_dynamo_tables", "line_number": 396, "usage_type": "call"}, {"api_name": "helpers.dynamo", "line_number": 396, "usage_type": "name"}, {"api_name": "helpers.dynamo.find_manifests_dynamo_table", "line_number": 438, "usage_type": "call"}, {"api_name": "helpers.dynamo", "line_number": 438, "usage_type": "name"}, {"api_name": "helpers.iam.get_underlying_role_arn", "line_number": 467, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 468, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 468, "usage_type": "attribute"}, {"api_name": "click.echo", "line_number": 511, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 512, "usage_type": "call"}, {"api_name": "helpers.iam.create_aws_client_from_role_arn", "line_number": 518, "usage_type": "call"}, {"api_name": "helpers.iam.ADMIN_ROLE_ARN", "line_number": 519, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 526, "usage_type": "call"}, {"api_name": "helpers.reporting.get_reporting_client", "line_number": 527, "usage_type": "call"}, {"api_name": "elasticsearch.exceptions.NotFoundError", "line_number": 534, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 545, "usage_type": "call"}, {"api_name": "helpers.reporting.get_reporting_client", "line_number": 546, "usage_type": "call"}, {"api_name": "elasticsearch.exceptions.NotFoundError", "line_number": 553, "usage_type": "name"}, {"api_name": "helpers.iam.ACCOUNT_ID", "line_number": 561, "usage_type": "name"}, {"api_name": "helpers.iam.create_aws_client_from_role_arn", "line_number": 562, "usage_type": "call"}, {"api_name": "helpers.iam.DEV_ROLE_ARN", "line_number": 562, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 574, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 582, "usage_type": "call"}, {"api_name": "helpers.reporting.get_reporting_client", "line_number": 584, "usage_type": "call"}, {"api_name": "elasticsearch.exceptions.NotFoundError", "line_number": 594, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 599, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 600, "usage_type": "call"}, {"api_name": "helpers.iam.temporary_iam_credentials", "line_number": 631, "usage_type": "call"}, {"api_name": "helpers.iam.ADMIN_ROLE_ARN", "line_number": 632, "usage_type": "name"}, {"api_name": "helpers.iam.create_aws_client_from_role_arn", "line_number": 634, "usage_type": "call"}, {"api_name": "helpers.iam.READ_ONLY_ROLE_ARN", "line_number": 635, "usage_type": "name"}, {"api_name": "helpers.iam.create_aws_client_from_credentials", "line_number": 637, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 642, "usage_type": "call"}, {"api_name": "helpers.s3.delete_s3_prefix", "line_number": 643, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 652, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 653, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 655, "usage_type": "call"}, {"api_name": "helpers.azure.unlocked_azure_container", "line_number": 659, "usage_type": "call"}, {"api_name": "helpers.azure", "line_number": 659, "usage_type": "name"}, {"api_name": "helpers.azure.delete_azure_prefix", "line_number": 662, "usage_type": "call"}, {"api_name": "helpers.azure", "line_number": 662, "usage_type": "name"}, {"api_name": "click.echo", "line_number": 670, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 671, "usage_type": "call"}, {"api_name": "helpers.iam.create_dynamo_client_from_role_arn", "line_number": 687, "usage_type": "call"}, {"api_name": "helpers.iam.DEV_ROLE_ARN", "line_number": 687, "usage_type": "name"}, {"api_name": "helpers.dynamo.bulk_delete_dynamo_items", "line_number": 695, "usage_type": "call"}, {"api_name": "helpers.dynamo", "line_number": 695, "usage_type": "name"}, {"api_name": "helpers.iam.ACCOUNT_ID", "line_number": 711, "usage_type": "name"}, {"api_name": "helpers.iam.temporary_iam_credentials", "line_number": 723, "usage_type": "call"}, {"api_name": "helpers.iam.ADMIN_ROLE_ARN", "line_number": 724, "usage_type": "name"}, {"api_name": "boto3.resource", "line_number": 726, "usage_type": "call"}]} +{"seq_id": "581759263", "text": "import configparser\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n\nclass Configuration:\n default_server_ip = 'localhost'\n default_server_port = 8000\n\n def __init__(self, filename='config.cfg'):\n self.config = configparser.ConfigParser()\n self.config.read(filename)\n\n @property\n def server_ip(self):\n try:\n return self.config.get('server', 'ip')\n except (configparser.NoSectionError, configparser.NoOptionError):\n logger.warn(\n 'No IP specified for the server in the configuration, using {}'.format(self.default_server_ip))\n return self.default_server_ip\n\n @property\n def server_port(self):\n try:\n return self.config.getint('server', 'port')\n except (configparser.NoSectionError, configparser.NoOptionError):\n logger.warn(\n 'No port specified for the server in the configuration, using {}'.format(self.default_server_port))\n return self.default_server_port\n", "sub_path": "lab2/common/configuration.py", "file_name": "configuration.py", "file_ext": "py", "file_size_in_byte": 1031, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "logging.getLogger", "line_number": 4, "usage_type": "call"}, {"api_name": "configparser.ConfigParser", "line_number": 12, "usage_type": "call"}, {"api_name": "configparser.NoSectionError", "line_number": 19, "usage_type": "attribute"}, {"api_name": "configparser.NoOptionError", "line_number": 19, "usage_type": "attribute"}, {"api_name": "configparser.NoSectionError", "line_number": 28, "usage_type": "attribute"}, {"api_name": "configparser.NoOptionError", "line_number": 28, "usage_type": "attribute"}]} +{"seq_id": "427080469", "text": "import json\nimport socket\nimport sys\nimport subprocess\n\nUSE_CENTERED_MASTER = False\n\nULTRAWIDE = \"DP-2\"\nVERTICAL = \"DP-4\"\nSMALL = \"HDMI-A-1\"\n\nDEBUG = False\n\n\ndef info(cmd):\n with subprocess.Popen([\"hyprctl\", \"-j\", cmd], stdout=subprocess.PIPE) as proc:\n return json.loads(proc.stdout.read())\n\n\nIS_DESKTOP = socket.gethostname().endswith(\"desktop\")\n\n\ndef workspace_info(workspace):\n for wksp in info(\"workspaces\"):\n if wksp[\"name\"] == workspace:\n return wksp\n\n\ndef dispatch(*args):\n cmd = [\"hyprctl\", \"dispatch\", *args]\n if DEBUG:\n print(\"[DEBUG]\", cmd)\n subprocess.run(cmd)\n\n\ndef set_workspace_orientation(workspace):\n if not IS_DESKTOP:\n return\n\n wksp = workspace_info(workspace)\n\n if wksp[\"windows\"]:\n if wksp[\"monitor\"] == VERTICAL:\n dispatch(\"layoutmsg\", \"orientationtop\")\n elif wksp[\"monitor\"] == SMALL:\n dispatch(\"layoutmsg\", \"orientationleft\")\n elif USE_CENTERED_MASTER and wksp[\"monitor\"] == ULTRAWIDE:\n dispatch(\"layoutmsg\", \"orientationcenter\")\n\n\nif __name__ == \"__main__\":\n while 1:\n line = sys.stdin.readline()\n [ev, ev_args] = line.split(\">>\")\n ev_args = ev_args.strip().split(\",\")\n\n # print(\"[EVENT]\", ev)\n if ev == \"monitoradded\":\n if IS_DESKTOP:\n subprocess.run(\"hypr-monitors\")\n\n # always reset wallpaper and waybar\n # subprocess.run(\"hypr-wallpaper\", stdout=subprocess.DEVNULL)\n\n elif ev == \"monitorremoved\":\n # focus workspace on ultrawide\n if IS_DESKTOP:\n dispatch(\"focusmonitor\", ULTRAWIDE)\n\n # elif ev == \"workspace\":\n # [workspace] = ev_args\n elif ev == \"openwindow\":\n [win_id, workspace, *_] = ev_args\n set_workspace_orientation(workspace)\n elif ev == \"movewindow\":\n [win_id, workspace] = ev_args\n set_workspace_orientation(workspace)\n # elif ev == \"closewindow\":\n # [win_id] = ev_args\n # elif ev == \"focusedmon\":\n # [mon, workspace] = ev_args\n\n else:\n # print(ev, ev_args)\n pass\n", "sub_path": "home-manager/hyprland/ipc.py", "file_name": "ipc.py", "file_ext": "py", "file_size_in_byte": 2192, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "subprocess.Popen", "line_number": 16, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 16, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 17, "usage_type": "call"}, {"api_name": "socket.gethostname", "line_number": 20, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 33, "usage_type": "call"}, {"api_name": "sys.stdin.readline", "line_number": 53, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 53, "usage_type": "attribute"}, {"api_name": "subprocess.run", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "187597454", "text": "import sys\nfrom os import path\nsys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )\n\nfrom spacy.tokens import Doc\nimport spacy\nfrom tokenization import BasicTokenizer\n\ndef my_tokenizer(text):\n bert_tokens=basic_tokenizer.tokenize(text) \n return Doc(nlp.vocab,words=bert_tokens)\n\nnlp=spacy.load('en_core_web_lg')\nnlp.tokenizer=my_tokenizer\nnever_split=(\"[UNK]\", \"[SEP]\", \"[PAD]\", \"[CLS]\", \"[MASK]\")\nbasic_tokenizer = BasicTokenizer(do_lower_case=True,\n never_split=never_split)\n\ntext='The switches between clarity and intoxication gave me a headache, but at least the silver-haired faery’s explanation of the queens’ “gifts” helped me understand why I could want to wrap my legs around a creature who terrified me.'\n\nspacy_doc=nlp(text)\n\nspacy_tokens=[(t.i,\n # t.text,\n # t.head.text,\n t.head.i) for t in spacy_doc]\n\n# for token in spacy_doc:\n# print(token.text, token.dep_, token.head.text, token.head.pos_,\n# [child for child in token.children])\n\n# for chunk in spacy_doc.noun_chunks:\n# print(chunk.text)#, chunk.root.text, chunk.root.dep_,\n #chunk.root.head.text)", "sub_path": "PyTorch/LanguageModeling/BERT/scratch/test_dep_parser.py", "file_name": "test_dep_parser.py", "file_ext": "py", "file_size_in_byte": 1209, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 3, "usage_type": "call"}, {"api_name": "os.path", "line_number": 3, "usage_type": "name"}, {"api_name": "os.path.abspath", "line_number": 3, "usage_type": "call"}, {"api_name": "spacy.tokens.Doc", "line_number": 11, "usage_type": "call"}, {"api_name": "spacy.load", "line_number": 13, "usage_type": "call"}, {"api_name": "tokenization.BasicTokenizer", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "548093395", "text": "from django.conf.urls import url\n\nfrom . import views\nfrom django.contrib.auth import views as auth_views\n\nurlpatterns = [\n url(r'^register/$', views.register_page, name=\"register\"),\n url(r'^login/$', views.login_view ,name=\"login\"),\n url(r'^logout/$', views.logout_page, name=\"logout\"),\n url(r'^$', views.main_page, name=\"main page\"),\n url(r'^profile/(?P[0-9]+)/$', views.edit_user, name='user_profile_edit')\n]\n", "sub_path": "accounts/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 431, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.conf.urls.url", "line_number": 7, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}]} +{"seq_id": "499944497", "text": "\"\"\"\nProvides the function that fully processes a range of hours\n\"\"\"\n\nfrom datetime import timedelta\nimport pandas as pd\nfrom os import remove, listdir\nfrom os.path import isfile\n\n# Other required functions\nfrom linktools import get_hours_list, get_download_link\nfrom downloader import process_file\n\ndef process_range(start_time, end_time, append=False):\n \"\"\"\n Downloads and processes hourly Wikipedia page traffic stats within a given time frame\n Backups are regularly saved to the hard drive in the form of csv files\n append allows to pick up from a previously generated csv file\n Inputs: start_time: 'YYYY-MM-DD-HH'\n end_time: 'YYYY-MM-DD-HH'\n \"\"\"\n\n # create the list of all hours to be downloaded\n hourslist = get_hours_list(start_time, end_time)\n # format properly\n start_time = hourslist[0]\n end_time = hourslist[-1]\n total_hours = len(hourslist)\n skipped_hours = 0\n\n # append data to most recent file if so desired\n if append == True:\n # get the most recent file in the folder\n if isfile('error.log'):\n # prevent picking up the error log\n prev_filename = sorted(listdir())[-2]\n else:\n prev_filename = sorted(listdir())[-1]\n # create a new hourslist based on the new file\n start_time = prev_filename[-14:-10] + '-' + prev_filename[-10:-8] + '-' +\\\n prev_filename[-8:-6] + '-' + prev_filename[-6:-4]\n end_time = end_time.strftime('%Y-%m-%d-%H')\n # skip the first hour since that already exists on the hard drive\n hourslist = get_hours_list(start_time, end_time)[1:]\n start_time = hourslist[0]\n end_time = hourslist[-1]\n skipped_hours = total_hours - len(hourslist)\n \n # deal with one hour at a time\n for hours_done, hour in enumerate(hourslist):\n # create the link\n download_link = get_download_link(hour)\n # process the link and save the output to a data frame\n try:\n current_data = process_file(download_link)\n except:\n try:\n # sometimes the file name has a '1' (second) in the end\n download_link = list(download_link)\n download_link[-4] = '1'\n download_link = ''.join(download_link)\n current_data = process_file(download_link)\n except:\n try:\n # sometimes the file name has a '5' (minute) in there\n download_link = list(download_link)\n download_link[-6] = '5'\n download_link = ''.join(download_link)\n current_data = process_file(download_link)\n except:\n try:\n # sometimes the file name has a '15' (minute) in there\n download_link = list(download_link)\n download_link[-6] = '5'\n download_link[-7] = '1'\n download_link = ''.join(download_link)\n current_data = process_file(download_link)\n except:\n try:\n # sometimes the file name has a '10' (minute) in there\n download_link = list(download_link)\n download_link[-7] = '1'\n download_link = ''.join(download_link)\n current_data = process_file(download_link)\n except:\n # out of ideas; make a note and move on\n with open('error.log', 'a') as text:\n problem_hour = hour.strftime('%Y-%m-%d-%H') + '\\n'\n text.write(problem_hour)\n continue\n\n # add a column containing the hours\n current_data['hour'] = hour\n \n # save the results to a csv\n filename = 'data_up_to' + hour.strftime('%Y%m%d%H') + '.csv'\n if (hour == start_time) and (append == False):\n current_data.to_csv(filename, index=False)\n del current_data\n else:\n # Load previous data\n try:\n prev_data = pd.read_csv(prev_filename)\n except:\n try:\n # in case the previous hour was skipped due to an error\n prev_hour = hour - timedelta(hours=2)\n prev_filename = 'data_up_to' + prev_hour.strftime('%Y%m%d%H') + '.csv'\n prev_data = pd.read_csv(prev_filename)\n except:\n # in case the previous two hours were skipped due to an error\n prev_hour = hour - timedelta(hours=3)\n prev_filename = 'data_up_to' + prev_hour.strftime('%Y%m%d%H') + '.csv'\n prev_data = pd.read_csv(prev_filename)\n # Append current data\n new_data = prev_data.append(current_data)\\\n .reset_index(drop=True)\\\n .sort_values(by=['language', 'type', 'hour'])\\\n .reset_index(drop=True)\n # save the csv\n new_data.to_csv(filename, index=False)\n # clean up\n del prev_data, current_data, new_data\n \n # save the current file name for later to append lated data to it\n prev_filename = filename\n \n # status update: How far are we?\n percent_done = (hours_done + skipped_hours + 1) / total_hours * 100\n pretty_current_hour = hour.strftime('%Y-%m-%d %H:%M')\n status_update = 'Just finished with {latest}. That\\'s {done} hour(s) so far ({perc}%).'\n status_print = status_update.format(latest=pretty_current_hour,\n done=hours_done + skipped_hours + 1,\n perc=round(percent_done, 1))\n print(status_print)\n\n\t # have a rolling backup\n # always keep the last 5 files created\n if hours_done > 5:\n delete_hour = hour - timedelta(hours=6)\n delete_data = 'data_up_to' + delete_hour.strftime('%Y%m%d%H') + '.csv'\n try:\n remove(delete_data)\n except:\n print('No data deleted')\n return 'DONE!!!'\n", "sub_path": "DataScripts/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6379, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "linktools.get_hours_list", "line_number": 24, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 34, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 36, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 38, "usage_type": "call"}, {"api_name": "linktools.get_hours_list", "line_number": 44, "usage_type": "call"}, {"api_name": "linktools.get_download_link", "line_number": 52, "usage_type": "call"}, {"api_name": "downloader.process_file", "line_number": 55, "usage_type": "call"}, {"api_name": "downloader.process_file", "line_number": 62, "usage_type": "call"}, {"api_name": "downloader.process_file", "line_number": 69, "usage_type": "call"}, {"api_name": "downloader.process_file", "line_number": 77, "usage_type": "call"}, {"api_name": "downloader.process_file", "line_number": 84, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 103, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 107, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 109, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 112, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 114, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 140, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 143, "usage_type": "call"}]} +{"seq_id": "39732578", "text": "import requests\r\nfrom pyquery import PyQuery as pq\r\nimport redis\r\n\r\ndef get_page(url):\r\n #ip = requests.get('http://api.xdaili.cn/xdaili-api//greatRecharge/getGreatIp?spiderId=6f5f8995823e4968ba6b7a52a0f5d406&orderno=YZ201891066037Hjtfv&returnType=1&count=1').text.strip(\"\\r\\n\")\r\n #proxies ={'http':'http://'+ip}\r\n #print(proxies)\r\n try:\r\n #定义头信息\r\n headers = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.81 Safari/537.36',\r\n }\r\n res = requests.get(url=url,headers=headers)\r\n if res.status_code == 200:\r\n return res.text\r\n else:\r\n return None\r\n except Exception as err:\r\n print(err)\r\n return None\r\n\r\ndef parse_page(html):\r\n #链接redis数据库\r\n link = redis.StrictRedis(host = '127.0.0.1',port=6379)\r\n\r\n doc = pq(html)\r\n items = doc('table.tagCol tr td a').items()\r\n for item in items:\r\n tag = item.attr('href')\r\n link.lpush('book:tag_urls',tag)\r\n\r\n\r\ndef main():\r\n url = 'https://book.douban.com/tag/?view=type&icn=index-sorttags-all'\r\n html = get_page(url)\r\n parse_page(html)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n", "sub_path": "douban.py", "file_name": "douban.py", "file_ext": "py", "file_size_in_byte": 1248, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "requests.get", "line_number": 14, "usage_type": "call"}, {"api_name": "redis.StrictRedis", "line_number": 25, "usage_type": "call"}, {"api_name": "pyquery.PyQuery", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "580718778", "text": "#!.venv/bin/python\n\nimport os\nfrom flask import Flask, request, jsonify, make_response, json\nfrom app import app, db, models\nfrom app.models import Card\nfrom utils import cors_response\nfrom datetime import datetime\n\n#this api function has no authentication associated with it\n@app.route('/deku/api/messages/', methods=['GET', 'POST'])\ndef messages(user_id):\n if request.method == 'GET':\n user = models.User.query.get(int(user_id))\n if user:\n # Get messages\n messages = models.Message.query.filter(models.Message.to_id == user_id).all()\n return cors_response((jsonify(messages=[message.serialize for message in messages]), 200))\n else:\n return cors_response((\"User not found.\", 400))\n\n elif request.method == 'POST': \n poster_id = request.form.get('poster_id')\n message = request.form.get('message')\n message = models.Message(to_id=user_id,\n from_id=poster_id,\n message=message,\n timestamp=datetime.utcnow())\n\n db.session.add(message)\n db.session.commit()\n return cors_response((jsonify(message = message.serialize), 201))\n else:\n pass\n", "sub_path": "app/messages.py", "file_name": "messages.py", "file_ext": "py", "file_size_in_byte": 1271, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "flask.request.method", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "app.models.User.query.get", "line_number": 14, "usage_type": "call"}, {"api_name": "app.models.User", "line_number": 14, "usage_type": "attribute"}, {"api_name": "app.models", "line_number": 14, "usage_type": "name"}, {"api_name": "app.models.Message.query.filter", "line_number": 17, "usage_type": "call"}, {"api_name": "app.models.Message", "line_number": 17, "usage_type": "attribute"}, {"api_name": "app.models", "line_number": 17, "usage_type": "name"}, {"api_name": "utils.cors_response", "line_number": 18, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 18, "usage_type": "call"}, {"api_name": "utils.cors_response", "line_number": 20, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "app.models.Message", "line_number": 25, "usage_type": "call"}, {"api_name": "app.models", "line_number": 25, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "name"}, {"api_name": "app.db.session.add", "line_number": 30, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 30, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 30, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 31, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 31, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 31, "usage_type": "name"}, {"api_name": "utils.cors_response", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 32, "usage_type": "call"}, {"api_name": "app.app.route", "line_number": 11, "usage_type": "call"}, {"api_name": "app.app", "line_number": 11, "usage_type": "name"}]} +{"seq_id": "372880720", "text": "# Noah Hefner\n# Menu Manager for Pygame\n# Last Edit: 2.15.2019\n\nimport pygame\nfrom MenuManager import MenuManager\nimport Constants\nfrom MenuPage import MenuPage\nfrom TextButton import TextButton\n\nBLACK = [0,0,0]\n\npygame.init()\n\n# Set the width and height of the screen [width, height]\nsize = (Constants.SCREEN_WIDTH, Constants.SCREEN_HEIGHT)\nscreen = pygame.display.set_mode(size)\n\npygame.display.set_caption(\"Menu Manager Demo\")\n\ndone = False\n\nclock = pygame.time.Clock()\n\npage_home = MenuPage(\"HOME\")\n\nbutton_play = TextButton(\"PLAY\", \"GAME\")\nbutton_upgrades = TextButton(\"UPGRADES\", \"UPGRADES\")\nbutton_highscores = TextButton(\"HIGHSCORES\", \"HIGHSCORES\")\nbutton_quit = TextButton(\"QUIT\", \"QUIT\")\n\npage_home.add_text_button(button_play)\npage_home.add_text_button(button_upgrades)\npage_home.add_text_button(button_highscores)\npage_home.add_text_button(button_quit)\n\npage_upgrades = MenuPage(\"UPGRADES\")\n\nbutton_speed = TextButton(\"+SPEED (-10 Coins)\", None)\nbutton_ammo = TextButton(\"+AMMO (-20 Coins)\", None)\nbutton_lives = TextButton(\"+LIFE (-30 Coins)\", None)\nbutton_back = TextButton(\"BACK\", \"HOME\")\n\npage_upgrades.add_text_button(button_speed)\npage_upgrades.add_text_button(button_ammo)\npage_upgrades.add_text_button(button_lives)\npage_upgrades.add_text_button(button_back)\n\npage_highscores = MenuPage(\"HIGHSCORES\")\nbutton_back = TextButton(\"BACK\", \"HOME\")\n# Read highscores from text file and use a loop to create ten buttons with Args\n# name as the persons name and None as the to_page. Use another loop to add\n# the buttons to the page.\npage_highscores.add_text_button(button_back)\n\n# page_home is auto-added when we create the sf_menus object\n# One menu page is required to create a manager\nsf_menus = MenuManager(page_home)\nsf_menus.add_menu_page(page_upgrades)\nsf_menus.add_menu_page(page_highscores)\n\npage_home.set_x_positioning(\"mid\")\npage_home.set_y_positioning(\"mid\")\n\npage_upgrades.set_x_positioning(\"mid\")\npage_upgrades.set_y_positioning(\"mid\")\n\n# The state variable determines what portion of the program should be displayed.\n# Your games update method should return one of these strings and save it to the\n# state variable.\n# \"GAME\": For when the game is not over yet\n# \"MENU\": For when you want to return to the main menu\n# \"HIGHSCORE_SAVER\": For when you want to save the score of the game as a\n# highscore\n# \"QUIT\": For when you want to end the whole program\nstate = \"MENU\" # Set state to menu to load the menu unpon startup\n\nwhile not done:\n\n for event in pygame.event.get():\n\n state = sf_menus.update(event)\n\n if event.type == pygame.QUIT:\n\n done = True\n\n screen.fill(BLACK)\n \n if state == \"MENU\":\n # Notice how we pass event to the menumanager update method\n state = sf_menus.update(event)\n\n sf_menus.display(screen)\n\n elif state == \"GAME\":\n # Your game methods go here.\n pass\n\n elif state == \"QUIT\":\n\n done = True\n\n elif state == \"HIGHSCORE_SAVER\":\n\n pass\n\n pygame.display.flip()\n\n clock.tick(60)\n\npygame.quit()\n", "sub_path": "Example_Demo.py", "file_name": "Example_Demo.py", "file_ext": "py", "file_size_in_byte": 3039, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pygame.init", "line_number": 13, "usage_type": "call"}, {"api_name": "Constants.SCREEN_WIDTH", "line_number": 16, "usage_type": "attribute"}, {"api_name": "Constants.SCREEN_HEIGHT", "line_number": 16, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 23, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 23, "usage_type": "attribute"}, {"api_name": "MenuPage.MenuPage", "line_number": 25, "usage_type": "call"}, {"api_name": "TextButton.TextButton", "line_number": 27, "usage_type": "call"}, {"api_name": "TextButton.TextButton", "line_number": 28, "usage_type": "call"}, {"api_name": "TextButton.TextButton", "line_number": 29, "usage_type": "call"}, {"api_name": "TextButton.TextButton", "line_number": 30, "usage_type": "call"}, {"api_name": "MenuPage.MenuPage", "line_number": 37, "usage_type": "call"}, {"api_name": "TextButton.TextButton", "line_number": 39, "usage_type": "call"}, {"api_name": "TextButton.TextButton", "line_number": 40, "usage_type": "call"}, {"api_name": "TextButton.TextButton", "line_number": 41, "usage_type": "call"}, {"api_name": "TextButton.TextButton", "line_number": 42, "usage_type": "call"}, {"api_name": "MenuPage.MenuPage", "line_number": 49, "usage_type": "call"}, {"api_name": "TextButton.TextButton", "line_number": 50, "usage_type": "call"}, {"api_name": "MenuManager.MenuManager", "line_number": 58, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 80, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 80, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 84, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 108, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 112, "usage_type": "call"}]} +{"seq_id": "539431903", "text": "# See https://github.com/projectmesa/mesa/blob/main/examples/boid_flockers/boid_flockers/server.py\n# This is being used as a starting point for our project\nfrom mesa.visualization.ModularVisualization import ModularServer\n\nfrom .boid import Boid\nfrom .scout import Scout\nfrom .goal import Goal\nfrom .model import BoidFlockers\nfrom .SimpleContinuousModule import SimpleCanvas\n\n\ndef boid_draw(agent):\n portrayal = None\n if isinstance(agent, Boid):\n portrayal = {\n 'Shape': 'circle',\n 'r': 2,\n 'Filled': 'true',\n 'Color': 'Red'\n }\n elif isinstance(agent, Scout):\n portrayal = {\n 'Shape': 'circle',\n 'r': 4,\n 'Filled': 'true',\n 'Color': 'Blue'\n }\n elif isinstance(agent, Goal):\n portrayal = {\n 'Shape': 'cirlcle',\n 'r': 6,\n 'Filled': 'true',\n 'Color': 'Red'\n }\n\n return portrayal\n\n\nboid_canvas = SimpleCanvas(boid_draw, 400, 1000)\n# just use defaults\n# model_params = {\n# \"population\": 100,\n# \"width\": 100,\n# \"height\": 100,\n# \"speed\": 5,\n# \"vision\": 10,\n# \"separation\": 2,\n# }\n\nserver = ModularServer(BoidFlockers, [boid_canvas], \"Boids\")", "sub_path": "src/server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 1247, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "boid.Boid", "line_number": 14, "usage_type": "argument"}, {"api_name": "scout.Scout", "line_number": 21, "usage_type": "argument"}, {"api_name": "goal.Goal", "line_number": 28, "usage_type": "argument"}, {"api_name": "SimpleContinuousModule.SimpleCanvas", "line_number": 39, "usage_type": "call"}, {"api_name": "mesa.visualization.ModularVisualization.ModularServer", "line_number": 50, "usage_type": "call"}, {"api_name": "model.BoidFlockers", "line_number": 50, "usage_type": "argument"}]} +{"seq_id": "409198542", "text": "import numpy as np\r\nfrom sklearn.cluster import MeanShift\r\nfrom sklearn.datasets.samples_generator import make_blobs\r\nimport matplotlib.pyplot as plt\r\n\r\ncenters = [[1,2],[4,4],[2,10]]\r\nX, _ = make_blobs(n_samples = 500, centers = centers, cluster_std = 1)\r\n\r\nplt.scatter(X[:,0],X[:,1])\r\nplt.show()\r\n\r\nms = MeanShift()\r\nms.fit(X)\r\nlabels = ms.labels_\r\ncluster_centers = ms.cluster_centers_\r\nn_clusters_ = len(np.unique(labels))\r\n\r\nprint(\"Numbers of estimated clusters : \",n_clusters_)\r\nprint(\"Center Points are....\\n\",cluster_centers)\r\ncolors = 10 * ['r.', 'm.', 'g.', 'b.', 'k.', 'y.']\r\n\r\nprint(colors)\r\nprint(labels)\r\nfor i in range(len(X)) :\r\n plt.plot(X[i][0],X[i][1], colors[labels[i]], markersize = 10)\r\n\r\nplt.scatter(cluster_centers[:,0],cluster_centers[:,1],\r\n marker=\"x\", s=150, linewidths=5,zorder=10)\r\nplt.show()\r\n\r\n", "sub_path": "ML-Learning/Clustering/hierarchialClustering.py", "file_name": "hierarchialClustering.py", "file_ext": "py", "file_size_in_byte": 840, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sklearn.datasets.samples_generator.make_blobs", "line_number": 7, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 9, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 9, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 10, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 10, "usage_type": "name"}, {"api_name": "sklearn.cluster.MeanShift", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}]} +{"seq_id": "151382451", "text": "\"\"\"Module for deploying MetaKB EB environment.\"\"\"\nimport boto3\nimport time\nelasticbeanstalk = boto3.client('elasticbeanstalk')\nservicecatalog = boto3.client('servicecatalog')\nterminate_time = 12\neb_app_name = \"metakb\"\neb_env_name = \"metakb-staging-env\"\nsc_product_id = \"prod-m4b65t5jgmcm4\"\nprint(f'Launching new Service Catalog Product for staging environment: '\n f'{eb_app_name}')\nsc_product_artifacts =\\\n servicecatalog.list_provisioning_artifacts(ProductId=sc_product_id)\nfor artifact in sc_product_artifacts['ProvisioningArtifactDetails']:\n if artifact['Active']:\n provisioning_artifact_id = artifact['Id']\ntry:\n eb_provisioned_product = servicecatalog.provision_product(\n ProductId=sc_product_id,\n ProvisioningArtifactId=provisioning_artifact_id,\n ProvisionedProductName=eb_env_name,\n ProvisioningParameters=[\n {\n 'Key': 'Env',\n 'Value': eb_app_name\n },\n {\n 'Key': 'EnvType',\n 'Value': 'staging'\n },\n {\n 'Key': 'TerminateTime',\n 'Value': str(terminate_time)\n }\n ])\n eb_provisioned_product_Id = \\\n eb_provisioned_product['RecordDetail']['ProvisionedProductId']\n product_status = servicecatalog.describe_provisioned_product(\n Id=eb_provisioned_product_Id)\n eb_provisioned_product_status =\\\n product_status['ProvisionedProductDetail']['Status']\n while eb_provisioned_product_status == \"UNDER_CHANGE\":\n time.sleep(10)\n product_status = servicecatalog.describe_provisioned_product(\n Id=eb_provisioned_product_Id)\n eb_provisioned_product_status = \\\n product_status['ProvisionedProductDetail']['Status']\n print(eb_provisioned_product_status)\nexcept: # noqa: E722\n print(\"The EB environment is already running...\")\n", "sub_path": "codebuild/deploy_eb_env.py", "file_name": "deploy_eb_env.py", "file_ext": "py", "file_size_in_byte": 1911, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "boto3.client", "line_number": 4, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 5, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "149613676", "text": "import pyowm\nimport time\nimport random\nimport datetime\nimport telepot\nimport wikipedia\nimport json\n\napi = '6d93d0e4608c561b117bcc41a860d938'\nowm = pyowm.OWM(api)\nforecast = owm.daily_forecast(\"Delhi,in\")\ntomorrow = pyowm.timeutils.tomorrow()\nforecast.will_be_sunny_at(tomorrow)\nobservation = owm.weather_at_place('Delhi,in')\nw = observation.get_weather()\ndef handle(msg):\n chat_id = msg['chat']['id']\n command = msg['text']\n\n print ('Got command: %s' % command)\n\n if command == '/roll':\n bot.sendMessage(chat_id, random.randint(1,6))\n elif command == '/time':\n bot.sendMessage(chat_id, str(datetime.datetime.now()))\n elif command == '/hi':\n bot.sendMessage(chat_id, \"Hello\")\n elif command == '/weather':\n bot.sendMessage(chat_id,\"New Delhi,India\\n\" + w.get_detailed_status() + \"\\n\\nTemperature Detials:\\n\" + str(w.get_temperature(unit='celsius'))+ \"\\n\\nWind Speed Details:\\n\" + str(w.get_wind())+\"\\n\\nCloud Coverage: \\n\" + str(w.get_clouds())+\"%\"+\"\\n\\nHumidity: \\n\" + str(w.get_humidity())+\"%\"+\"\\n\\nPressure Details :\\n\" + str(w.get_pressure())+\"\\n\\nData fetched by openweathermap API.All copyrights reserved\")\n elif '/wiki' in command :\n ny = wikipedia.summary(command[5:len(str(command))],sentences = 7)\n bot.sendMessage(chat_id,ny)\n elif command == '/help' :\n bot.sendMessage(chat_id,\"List of supported commands is \\n/hi - Greet Your Device\\n/roll - Rolls a dice\\n/weather - Tells detailed current weather report of Raspberry Pi's location\\n/time - Tells current date and time\\n/wiki - Does a topic search on wikipedia and gives a summary of the topic.Try /wiki \\n\\nSee your autofill for quick selection of command or tap '/' icon on right side of your chat textbox\")\n else :\n bot.sendMessage(chat_id,\"Type /help for list of supported commands till now,There are many more to come!!\")\nbot = telepot.Bot('228758380:AAELxfbRv-nlJXCV_L9hP6zEHwAFf1bm040')\nbot.message_loop(handle)\nprint ('I am listening ...')\n\nwhile 1:\n time.sleep(10)\n", "sub_path": "bot.py", "file_name": "bot.py", "file_ext": "py", "file_size_in_byte": 2049, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pyowm.OWM", "line_number": 10, "usage_type": "call"}, {"api_name": "pyowm.timeutils.tomorrow", "line_number": 12, "usage_type": "call"}, {"api_name": "pyowm.timeutils", "line_number": 12, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "attribute"}, {"api_name": "wikipedia.summary", "line_number": 31, "usage_type": "call"}, {"api_name": "telepot.Bot", "line_number": 37, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 42, "usage_type": "call"}]} +{"seq_id": "514270937", "text": "from selenium import webdriver\nimport requests\nimport os; import re\nimport time\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\ndef get_tags(driver):\n \n titlelist=[]\n chlist=[]\n num=0\n while True: \n titlepath='/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div[1]/div/div[7]/div[2]/ytd-video-primary-info-renderer/div/h1/yt-formatted-string' \n title=driver.find_element_by_xpath('/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div[1]/div/div[7]/div[2]/ytd-video-primary-info-renderer/div/h1/yt-formatted-string').text\n ch=driver.find_element_by_xpath('/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div[1]/div/div[9]/div[3]/ytd-video-secondary-info-renderer/div/div[2]/ytd-video-owner-renderer/div[1]/ytd-channel-name/div/div/yt-formatted-string/a').text\n \n titlelist.append(title); chlist.append(ch)\n time.sleep(2) \n \n #따봉\n #driver.find_element_by_xpath('/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div[1]/div/div[7]/div[2]/ytd-video-primary-info-renderer/div/div/div[3]/div/ytd-menu-renderer/div/ytd-toggle-button-renderer[1]/a/yt-icon-button/button/yt-icon').click()\n \n ##category check\n # try:\n # driver.find_element_by_xpath('/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div[1]/div/div[9]/div[3]/ytd-video-secondary-info-renderer/div/ytd-expander/paper-button[2]/yt-formatted-string')\\\n # .click()\n # except:\n # print('no 더보기 1')\n \n # try:\n # catpath=r\"/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div[1]/div/div[9]/div[3]/ytd-video-secondary-info-renderer/div/ytd-expander/ytd-metadata-row-container-renderer/div[2]/ytd-metadata-row-renderer/div/yt-formatted-string/a\"\n # category=driver.find_element_by_xpath(catpath).text\n # except:\n # catpath2=\"/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div[1]/div/div[9]/div[3]/ytd-video-secondary-info-renderer/div/ytd-expander/ytd-metadata-row-container-renderer/div[1]/ytd-metadata-row-renderer/div/yt-formatted-string/a\"\n # category=driver.find_element_by_xpath(catpath2).text\n ##########\n \n #다음 추천동영상 클릭\n #if up_category=='뉴스/정치':\n waypoint=driver.current_url\n waypoint=str(waypoint)\n \n #if category=='뉴스/정치':\n # driver.find_element_by_xpath(\"/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div[1]/div/div[12]/ytd-watch-next-secondary-results-renderer/div[2]/ytd-compact-autoplay-renderer/div[2]/ytd-compact-video-renderer/div[1]/div/div[1]/a/h3/span\").click()\n # time.sleep(4)\n \n indexer=1\n \n print(waypoint)\n ####################\n \n \n while True:\n \n top='/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div[1]/div/div[12]/ytd-watch-next-secondary-results-renderer/div[2]/ytd-compact-video-renderer['\n bottom=']/div[1]/div/div[1]/a/h3/span'\n \n driver.find_element_by_xpath(top+str(indexer)+bottom).click()\n indexer+=1\n ##category check\n \n \n ###로드될때까지 기다리기, 기준은 제목 로드\n WebDriverWait(driver, 100).until(\n EC.presence_of_element_located((By.XPATH, titlepath))\n )\n time.sleep(1)\n ###########\n try:\n duhbogi='/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div[1]/div/div[9]/div[3]/ytd-video-secondary-info-renderer/div/ytd-expander/paper-button[2]/yt-formatted-string'\n driver.find_element_by_xpath(duhbogi).click()\n break\n except:\n pass\n \n try:\n catpath=r\"/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div[1]/div/div[9]/div[3]/ytd-video-secondary-info-renderer/div/ytd-expander/ytd-metadata-row-container-renderer/div[2]/ytd-metadata-row-renderer/div/yt-formatted-string/a\"\n up_category=driver.find_element_by_xpath(catpath).text\n except:\n catpath2=\"/html/body/ytd-app/div/ytd-page-manager/ytd-watch-flexy/div[4]/div[1]/div/div[9]/div[3]/ytd-video-secondary-info-renderer/div/ytd-expander/ytd-metadata-row-container-renderer/div[1]/ytd-metadata-row-renderer/div/yt-formatted-string/a\"\n up_category=driver.find_element_by_xpath(catpath2).text\n ##########\n print(up_category)\n if ((up_category=='뉴스/정치')or(up_category=='인물/블로그')):\n previous_url = waypoint\n break\n else:\n \n #driver.back()\n driver.get(previous_url)\n \n time.sleep(3)\n \n ####################33\n print('while out')\n \n ####텍스트로 100마다 저장\n if num%100==0:\n f = open(r\"C:\\Users\\Z\\Desktop\\학교\\youtubemacro\\진보03_trial\"+(str(num))+\".txt\",'w', encoding='UTF8')\n f.write(str(titlelist))\n f.write('\\n');f.write('\\n');f.write('\\n')\n f.write(str(chlist))\n \n f.close()\n ####\n #팝업탭 닫기\n driver.window_handles\n while len(driver.window_handles)!=1:\n \n last_tab = driver.window_handles[-1]\n driver.switch_to.window(window_name=last_tab)\n \n driver.close() \n \n ###\n num=num+1\n \n return titlelist,chlist\n \n", "sub_path": "code/ongoing!!!/macro_get_title_n_ch.py", "file_name": "macro_get_title_n_ch.py", "file_ext": "py", "file_size_in_byte": 5861, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.ui.WebDriverWait", "line_number": 66, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions.presence_of_element_located", "line_number": 67, "usage_type": "call"}, {"api_name": "selenium.webdriver.support.expected_conditions", "line_number": 67, "usage_type": "name"}, {"api_name": "selenium.webdriver.common.by.By.XPATH", "line_number": 67, "usage_type": "attribute"}, {"api_name": "selenium.webdriver.common.by.By", "line_number": 67, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 69, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 94, "usage_type": "call"}]} +{"seq_id": "433654584", "text": "import torch.nn as nn\nimport torchvision.models as models\n\nfrom network.pooling import WildcatPool2d, ClassWisePool\n\n\nclass ResNetWSL(nn.Module):\n def __init__(self, model, num_classes, pooling=WildcatPool2d(), dense=False):\n super(ResNetWSL, self).__init__()\n\n self.dense = dense\n\n self.features = nn.Sequential(\n model.conv1,\n model.bn1,\n model.relu,\n model.maxpool,\n model.layer1,\n model.layer2,\n model.layer3,\n model.layer4)\n\n # classification layer\n num_features = model.layer4[1].conv1.in_channels\n self.classifier = nn.Sequential(\n nn.Conv2d(num_features, num_classes, kernel_size=1, stride=1, padding=0, bias=True), nn.Sigmoid())\n\n self.spatial_pooling = pooling\n\n # image normalization\n self.image_normalization_mean = [0.485, 0.456, 0.406]\n self.image_normalization_std = [0.229, 0.224, 0.225]\n\n def forward(self, x):\n x = self.features(x)\n x = self.classifier(x)\n if not self.dense:\n x = self.spatial_pooling(x)\n return x\n\n def get_config_optim(self, lr, lrp):\n return [{'params': self.features.parameters(), 'lr': lr * lrp},\n {'params': self.classifier.parameters()},\n {'params': self.spatial_pooling.parameters()}]\n\n\nclass wildcat(nn.Module):\n def __init__(self, num_classes, pretrained=True, kmax=1, kmin=None, alpha=1, num_maps=1):\n super(wildcat, self).__init__()\n self.num_classes = num_classes\n self.pretrained = pretrained\n\n self.model = models.resnet50(self.pretrained)\n self.pooling = nn.Sequential()\n self.pooling.add_module('class_wise', ClassWisePool(num_maps))\n self.pooling.add_module('spatial', WildcatPool2d(kmax, kmin, alpha))\n self.wsl = ResNetWSL(self.model, self.num_classes * num_maps, pooling=self.pooling)\n\n def forward(self, x):\n x = self.wsl.forward(x)\n return x\n", "sub_path": "network/wildnet.py", "file_name": "wildnet.py", "file_ext": "py", "file_size_in_byte": 2030, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "torch.nn.Module", "line_number": 7, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 7, "usage_type": "name"}, {"api_name": "network.pooling.WildcatPool2d", "line_number": 8, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 13, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 25, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 25, "usage_type": "name"}, {"api_name": "torch.nn.Conv2d", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 26, "usage_type": "name"}, {"api_name": "torch.nn.Sigmoid", "line_number": 26, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 47, "usage_type": "name"}, {"api_name": "torchvision.models.resnet50", "line_number": 53, "usage_type": "call"}, {"api_name": "torchvision.models", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "network.pooling.ClassWisePool", "line_number": 55, "usage_type": "call"}, {"api_name": "network.pooling.WildcatPool2d", "line_number": 56, "usage_type": "call"}]} +{"seq_id": "518191877", "text": "from urllib.request import urlopen\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport time\nimport os\n\n\n# 사용자가 크롤링하려는 해시태그 이름으로 URL을 지정하고 해시태그 이름에 '#'를 추가해 줍니다.\ndef insta_searching(tag):\n url = 'https://www.instagram.com/explore/tags/' + tag\n tag = '#' + tag\n return tag, url\n\n\n# 해시태그로 검색된 URL로 이동하면 여러 게시물들이 나오는데 첫번째 게시물로 이동합니다.\ndef select_first(driver):\n first = driver.find_element_by_css_selector(\"div._9AhH0\")\n first.click()\n time.sleep(3)\n\n\ndef get_content(driver, idx1):\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n # 게시물의 태그 크롤링\n try:\n # 게시물 + 댓글 작성자 이름 리스트\n name_list = []\n # 게시물 + 댓글 내 태그 리스트\n tag_list = []\n # 게시물 + 댓굴 접근 div 리스트\n divs = soup.find_all(\"div\", {'class': 'C4VMK'})\n # print(\"게시물 + 댓글 갯수 : {}개\".format(len(divs)))\n if len(divs) > 0:\n # 게시물 + 댓글 내 작성자 이름 저장하기\n for idx in range(len(divs)):\n # 첫번쨰 인덱스는 게시물\n if idx == 0:\n name_list.append(divs[idx].select_one('h2 > div > span > a').text)\n print(\"게시물 작성자 아이디 : {}\".format(name_list[0]))\n # 나머지 인덱스는 모두 댓글\n else:\n name_list.append(divs[idx].select_one('h3 > div > span > a').text)\n # print(\"{}번째 댓글 작성자 아이디 : {}\".format(idx, name_list[idx]))\n # print(\"이름 갯수 : {}, 이름 리스트 : {}\".format(len(name_list), name_list))\n else:\n print(\"게시물 + 댓글 리스트 접근 실패!\")\n return None\n # 게시물 + 댓글 내 태그 저장하기\n for idx1 in range(len(name_list)):\n # 작성자 이름 리스트를 토대로 게시물 작성자 이름과 일치하는 아이디를 찾는다\n if name_list[0] == name_list[idx1]:\n tags = divs[idx1].find_all(\"a\", {\"class\": \"xil3i\"})\n # 게시물 + 댓글 내 태그가 있는경우\n if len(tags) > 0:\n for idx2 in range(len(tags)):\n tag_list.append(tags[idx2].text)\n # 검색 태그가 존재하는경우 반복문 나감\n if \"#\" + word in tag_list:\n break\n # 게시물 + 댓글 내 태그가 없는경우\n else:\n # 게시물 작성자가 자신의 댓글 내 답글에 태그를 작성할 경우 답글을 찾아보기 위해 답글 보기 버튼을 눌러야한다.\n if idx1 > 0 and has_css_selector(\"ul > ul:nth-child(\"+str(idx1+1)+\") > li > ul > li > div > button\"):\n # 답글 보기 버튼 접근을 하였고 버튼 내 텍스트에 접근하여 '답글 보기'와 '답글 숨기기'를 구별함\n reply_btn = soup.find(\"span\", {\"class\": \"EizgU\"}).text\n # '답글 보기' 되어있을 경우 해당 버튼을 눌러줍니다.\n if reply_btn != \"답글 숨기기\":\n # 해당 댓글의 '답글 보기' 버튼을 눌러줍니다.\n driver.find_element_by_css_selector(\"ul > ul:nth-child(\" + str(idx1 + 1) + \") > li > ul > li > div > button.sqdOP.yWX7d.y3zKF\").click()\n time.sleep(2)\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n # 웹페이지를 새로고침했기때문에 상위 경로부터 다시 지정해줌\n new_divs = soup.find_all(\"div\", {\"class\": \"C4VMK\"})\n num_of_reply = len(new_divs) - len(divs)\n # print(\"답글의 갯수 : {}개\".format(num_of_reply))\n # 답글에서 답글 작성자와 태그를 찾습니다.\n for idx3 in range(1, num_of_reply + 1):\n reply_name = new_divs[idx1 + idx3].select_one('h3 > div > span > a').text\n if name_list[0] == reply_name:\n reply_tags = new_divs[idx1 + idx3].find_all(\"a\", {\"class\": \"xil3i\"})\n if len(reply_tags) > 0:\n for idx4 in range(len(reply_tags)):\n tag_list.append(reply_tags[idx4].text)\n if \"#\" + word in tag_list:\n break\n if len(tag_list) == 0:\n print(\"게시물 테그를 불러오지 못했습니다.\")\n return None\n else:\n print(\"태그 갯수 : {}개, 태그 : {}\".format(len(tag_list), tag_list))\n\n except:\n # 게시물의 태그 크롤링 실패\n print(\"게시물 테그 크롤링 NULL\")\n return None\n\n # 게시물의 사진 크롤링\n try:\n # 게시물에 사진이 한장만 있는 경우\n if has_css_selector('div._97aPb > div > div > div.KL4Bh'):\n imgPath = 'div.zZYga > div > article > div._97aPb > div > div > div.KL4Bh > img'\n get_image(soup, imgPath, idx1)\n\n # 게시물에 사진이 한장만 있고 인물 태그가 붙어있는 경우\n elif has_css_selector('div._97aPb > div > div > div.eLAPa._23QFA'):\n imgPath = 'div.zZYga > div > article > div._97aPb > div > div > div.eLAPa._23QFA > div.KL4Bh > img'\n get_image(soup, imgPath, idx1)\n\n # 게시물에 사진이 여러장 있는 경우\n elif has_css_selector('div._97aPb > div > div.pR7Pc > div.Igw0E.IwRSH.eGOV_._4EzTm.O1flK.D8xaz.fm1AK.TxciK.yiMZG > div > div > div > ul'):\n get_image_list(soup, idx1)\n\n # 게시물에 비디오 영상이 한개 있는 경우\n else:\n print(\"해당 게시물은 영상(비디오)입니다.\")\n return None\n except:\n # 게시물의 이미지 크롤링 실패\n print(\"게시물 이미지 정보 NULL\")\n return None\n\n # 게시물 업로드 날짜 크롤링\n try:\n date = soup.select('time._1o9PC.Nzb55')[0]['datetime'][:10]\n # print(\"게시물 업로드 날짜 : {}\".format(date))\n except:\n # 게시물 업로드 날짜 크롤링 실패\n date = 'NULL'\n print(\"날짜 크롤링 NULL\")\n\n # 게시물의 좋아요 수 크롤링\n try:\n # 좋아요 수\n if has_css_selector('div.Nm9Fw > a'):\n if has_css_selector('div.Nm9Fw > a > span'):\n like = soup.select_one('div.Nm9Fw > a > span').text\n like = int(like.replace(',', ''))\n # print('좋아요 : {}개'.format(like))\n else:\n like = 1\n # print('좋아요 : {}개'.format(like))\n # 좋아요 수 정보가 없는 경우\n elif has_css_selector('div.Nm9Fw > button'):\n like = 0\n # print('좋아요 정보가 없습니다.')\n # 해당 게시물이 비디오일 경우 조회수\n elif has_css_selector('div.HbPOm._9Ytll > span'):\n like = 'NULL'\n print('좋아요가 아닌 조회수 정보가 있습니다.')\n except:\n # 게시물의 좋아요 수 크롤링 실패\n like = 'NULL'\n print(\"좋아요 크롤링 NULL\")\n\n # 사용자의 장소 크롤링\n try:\n # 게시물에 장소 정보가 있는 경우\n if has_css_selector('div.JF9hh > a'):\n place = soup.select_one('div.JF9hh > a').text\n # print('장소 : {}'.format(place))\n # 게시물에 장소 정보가 없는 경우\n else:\n place = ''\n # print('장소 정보가 비워져있습니다')\n except:\n # 사용자의 장소 크롤링 실패\n place = 'NULL'\n print(\"장소 크롤링 NULL\")\n\n # 크롤링한 정보 반환\n data = [date, like, place, tag_list]\n return data\n\n\n# css에 해당 selector 존재하는지 확인하는 함수입니다.\ndef has_css_selector(select):\n try:\n driver.find_element_by_css_selector(select)\n return True\n except:\n return False\n\n\n# 게시물 내 사진이 한장만 있을 경우 해당 사진을 저장하는 함수입니다.\ndef get_image(soup, imgPath, idx):\n img = soup.select_one(imgPath)[\"src\"]\n with urlopen(img) as f:\n createFolder('./{}'.format(word))\n with open('./{}/'.format(word) + word + str(idx + 1) + '.jpg', 'wb') as h:\n imgFile = f.read()\n h.write(imgFile)\n print(\"이미지 저장 : {}\".format(img))\n\n\n# 게시물 내 사진이 여러장 있을 경우 해당 사진들을 저장하는 함수입니다.\ndef get_image_list(soup, idx1):\n file = None\n file_type = None\n # 게시물 내 사진 및 비디오의 갯수를 저장합니다.\n num = len(soup.select('div.JSZAJ._3eoV-.IjCL9.WXPwG > div.Yi5aA'))\n for idx2 in range(num):\n ul = soup.find(\"ul\", {\"class\": \"vi798\"})\n lis = ul.find_all(\"li\", {\"class\": \"Ckrof\"})\n # 인스타그램 게시물은 사진을 넘길때마다 사진의 인덱스 순서가 변경되며\n # 삽입, 삭제 과정을 지닌 연결리스트 구조를 띄고 있기 때문에 사진 리스트를 매번 불러와야합니다.\n for idx3 in range(len(lis)):\n # 해당 인덱스에 사진이 있는 경우입니다.\n if lis[idx3].find(\"img\", {\"class\": \"FFVAD\"}) is not None and idx3 < len(lis)-1:\n file_type = \"img\"\n file, file_type = compare_to_the_following_index(file, file_type, lis, idx3)\n if file_type == \"img\":\n break\n # 해당 인덱스에 비디오가 있는 경우입니다.\n elif lis[idx3].find(\"video\", {\"class\": \"tWeCl\"}) is not None and idx3 < len(lis)-1:\n file_type = \"video\"\n file, file_type = compare_to_the_following_index(file, file_type, lis, idx3)\n if file_type == \"img\":\n break\n # 현재 인덱스에 사진 정보가 있는 경우 해당 사진을 저장합니다.\n if file is not None and file_type == \"img\":\n with urlopen(file) as f:\n createFolder('./{}'.format(word))\n with open('./{}/'.format(word) + word + str(idx1 + 1) + '-' + str(idx2 + 1) + '.jpg', 'wb') as h:\n imgFile = f.read()\n h.write(imgFile)\n print(\"{}번째 이미지 저장 : {}\".format(idx2+1, file))\n # 다음 사진으로 넘어가는 버튼이 있을 경우 다음 사진으로 이동합니다.\n if has_css_selector('button._6CZji') and idx2 < num-1:\n move_next(driver, \"next_img\")\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n print(\"{} -> {}\".format(idx2+1, idx2+2))\n\n\n# 현재 인덱스의 파일과 다음 인덱스의 파일을 비교하는 함수입니다.\ndef compare_to_the_following_index(file, file_type, lis, idx):\n # 현재 인덱스가 사진인 경우 사진 정보를 저장하고 비디오인 경우 비디오 정보를 저장합니다\n if file_type == \"img\":\n file_src = lis[idx].find(\"img\", {\"class\": \"FFVAD\"})[\"src\"]\n # print(\"현재 인덱스의 사진 리스트(이미지) : {}\".format(file_src))\n else:\n file_src = lis[idx].find(\"video\", {\"class\": \"tWeCl\"})[\"src\"]\n # print(\"현재 인덱스의 사진 리스트(비디오) : {}\".format(file_src))\n # 파일의 정보가 없는 초기 파일 정보를 저장합니다.\n if file is None:\n file = file_src\n # print(\"초기 사진 저장\")\n else:\n if file == file_src:\n # 다음 인덱스가 사진인 경우 사진 정보를 저장하고 비디오인 경우 비디오 정보를 저장합니다\n if lis[idx + 1].find(\"img\", {\"class\": \"FFVAD\"}) is not None:\n file = lis[idx + 1].find(\"img\", {\"class\": \"FFVAD\"})[\"src\"]\n # print(\"다음 인덱스의 사진 리스트(이미지) : {}\".format(file))\n file_type = \"img\"\n\n elif lis[idx + 1].find(\"video\", {\"class\": \"tWeCl\"}) is not None:\n file = lis[idx + 1].find(\"video\", {\"class\": \"tWeCl\"})[\"src\"]\n # print(\"다음 인덱스의 사진 리스트(비디오) : {}\".format(file))\n file_type = \"video\"\n return file, file_type\n\n\n# 주어진 형식에 따라 페이지를 이동하는 함수입니다.\ndef move_next(driver, type):\n # type이 \"next_img\"인 경우 다음 사진으로 이동합니다.\n if type == \"next_img\":\n driver.find_element_by_css_selector('button._6CZji').click()\n # type이 \"next_post\"인 경우 다음 게시물로 이동합니다.\n elif type == \"next_post\":\n driver.find_element_by_css_selector('a._65Bje.coreSpriteRightPaginationArrow').click()\n time.sleep(3)\n\n\n# 폴더를 추가하는 함수입니다.\ndef createFolder(directory):\n try:\n # 기존의 디렉토리에 폴더가 존재하지 않은 경우 폴더를 생성해 줍니다.\n if not os.path.exists(directory):\n os.makedirs(directory)\n except OSError:\n print('Error: Creating directory. ' + directory)\n\n\nif __name__ == '__main__':\n # Chrome Driver 경로를 지정하여 Chrome Driver 프로그램을 사용합니다.\n driver = webdriver.Chrome(\"chromedriver.exe\")\n # 인스타그램 사이트로 크롬 연결합니다.\n address = 'https://www.instagram.com'\n driver.get(address)\n time.sleep(3)\n # 인스타그램 로그인 페이지 섹션을 연결합니다.\n login_section = '//*[@id=\"loginForm\"]/div'\n driver.find_element_by_xpath(login_section).click()\n time.sleep(3)\n # 주어진 아이디를 로그인 아이디 키값으로 보내줍니다.\n elem_login = driver.find_element_by_name(\"username\")\n elem_login.clear()\n elem_login.send_keys('')# 사용자의 인스타그램 아이디를 추가해 ���어야합니다.\n # 주어진 비밀번호를 로그인 비밀번호 키값으로 보내줍니다.\n elem_login = driver.find_element_by_name('password')\n elem_login.clear()\n elem_login.send_keys('')# 사용자의 인스타그램 비밀번호를 추가해 주어야합니다.\n time.sleep(3)\n # 로그인 버튼을 눌러줍니다.\n xpath = '//*[@id=\"loginForm\"]/div/div[3]'\n driver.find_element_by_xpath(xpath).click()\n time.sleep(3)\n # 사용자 계정 정보를 저장하는 알림창을 무시해줍니다.\n xpath1 = '//*[@id=\"react-root\"]/section/main/div/div/div/div'\n driver.find_element_by_xpath(xpath1).click()\n time.sleep(3)\n # insta_searching 함수로 파생된 URL로 이동합니다.\n word = '' # 크롤링할 해시태그 이름을 word변수에 지정해 주어야합니다.\n word, url = insta_searching(word)\n driver.get(url)\n time.sleep(3)\n # select_first 함수로 해시태그 검색된 게시물 리스트 중 첫번째 게시물로 이동합니다.\n select_first(driver)\n # dates 리스트는 게시물의 날짜를 저장하는 리스트 입니다.\n dates = []\n # likes 리스트는 게시물의 좋아요를 저장하는 리스트 입니다.\n likes = []\n # places 리스트는 게시물의 장소를 저장하는 리스트 입니다.\n places = []\n # tags 리스트는 게시물의 태그들을 저장하는 리스트 입니다.\n tags = []\n # target 변수는 크롤링 횟수를 지정하는 변수입니다.\n target = 50\n # 크롤링 횟수만큼 반복하여 크롤링합니다.\n for idx1 in range(target):\n # idx2 변수는 크롤링 실패할 경우 크롤링 횟수에 포함시키지 않기 위해 크롤링 실패 횟수 변수입니다.\n idx2 = 0\n while True:\n # get_content 함수는 게시물 내 정보를 크롤링하는 함수입니다.\n data = get_content(driver, idx1)\n # 크롤링이 성공한 경우입니다.\n if data is not None:\n print('{}-{}번째 크롤링 성공!\\n'.format(idx1 + 1, idx2 + 1))\n # 크롤링한 게시물 내 정보를 dates, likes, places, tags 리스트에 추가해줍니다.\n dates.append(data[0])\n likes.append(data[1])\n places.append(data[2])\n tags.append(data[3])\n break\n # 크롤링이 실패한 경우입니다.\n else:\n print('{}-{}번째 크롤링 실패!\\n'.format(idx1 + 1, idx2 + 1))\n # move_next 함수는 다음 게시물로 이동합니다.\n move_next(driver, \"next_post\")\n idx2 = idx2 + 1\n # move_next 함수는 다음 게시물로 이동합니다.\n move_next(driver, \"next_post\")\n # 크롤링 횟수의 크기만큼 저장된 리스트들을 딕셔너리형식으로 저장합니다.\n data = {\n 'date': dates,\n 'like': likes,\n 'place': places,\n 'tags': tags,\n }\n # 딕셔너리형 변수를 csv파일로 변형하여 저장합니다.\n frame = pd.DataFrame(data)\n frame.to_csv(\"{}.csv\".format(word), encoding='utf-8-sig')\n", "sub_path": "crawling/crawling.py", "file_name": "crawling.py", "file_ext": "py", "file_size_in_byte": 17714, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "time.sleep", "line_number": 20, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 25, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 72, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 74, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 190, "usage_type": "call"}, {"api_name": "urllib.request.urlopen", "line_number": 224, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 234, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 274, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 281, "usage_type": "call"}, {"api_name": "os.path", "line_number": 281, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 282, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 289, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 289, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 293, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 297, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 306, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 310, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 314, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 319, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 364, "usage_type": "call"}]} +{"seq_id": "524523701", "text": "#!/usr/bin/env python\n\nimport rospkg\nfrom jsk_apc2016_common.segmentation_in_bin.bin_data import BinData\nfrom jsk_apc2016_common.rbo_segmentation.apc_data import APCSample\nfrom image_geometry import cameramodels\nimport numpy as np\nimport pickle\nimport cv2\n\nrospack = rospkg.RosPack()\npack_path = rospack.get_path('jsk_apc2016_common')\n\n\nclass RBOSegmentationInBin(object):\n \"\"\"\n This class bridges data and RBO's segmentation in bin algorithm.\n \"\"\"\n def __init__(self, *args, **kwargs):\n self.shelf = {}\n self.mask_img = None\n self.dist_img = None\n self._target_bin = None\n self.camera_model = cameramodels.PinholeCameraModel()\n if 'trained_pkl_path' in kwargs:\n self.load_trained(kwargs['trained_pkl_path'])\n if 'target_bin_name' in kwargs:\n self.target_bin_name = kwargs['target_bin_name']\n\n def from_bin_info_array(self, bin_info_arr):\n for bin_info in bin_info_arr.array:\n self.shelf[bin_info.name] = BinData(bin_info=bin_info)\n if self.target_bin_name is not None:\n self.target_bin = self.shelf[self.target_bin_name]\n self.target_object = self.target_bin.target\n\n def load_trained(self, path):\n with open(path, 'rb') as f:\n self.trained_segmenter = pickle.load(f)\n\n @property\n def target_bin_name(self):\n return self._target_bin_name\n\n @target_bin_name.setter\n def target_bin_name(self, target_bin_name):\n if target_bin_name is not None:\n self._target_bin_name = target_bin_name\n if target_bin_name in self.shelf:\n self.target_object = self.shelf[target_bin_name].target\n assert self.target_object is not None\n self.target_bin = self.shelf[target_bin_name]\n\n @property\n def camera_info(self):\n return self._camera_info\n\n @camera_info.setter\n def camera_info(self, camera_info):\n self._camera_info = camera_info\n if camera_info is not None:\n self.camera_model.fromCameraInfo(camera_info)\n\n @property\n def img_color(self):\n return self._img_color\n\n @img_color.setter\n def img_color(self, img_color):\n # following RBO's convention that img is loaded as HSV\n if img_color is not None:\n self._img_color = cv2.cvtColor(img_color, cv2.COLOR_BGR2HSV)\n else:\n self._img_color = None\n\n def set_apc_sample(self):\n assert self.target_object is not None\n # TODO: work on define_later later\n define_later = np.zeros((\n self.camera_info.height, self.camera_info.width))\n data = {}\n data['objects'] = self.target_bin.objects\n data['dist2shelf_image'] = self.dist_img\n data['depth_image'] = define_later\n data['has3D_image'] = define_later\n data['height3D_image'] = self.height_img\n data['height2D_image'] = define_later\n self.apc_sample = APCSample(\n image_input=self.img_color,\n bin_mask_input=self.mask_img,\n data_input=data,\n labeled=False,\n infer_shelf_mask=False,\n pickle_mask=False)\n\n def segmentation(self):\n zoomed_predicted_segment = self.trained_segmenter.predict(\n apc_sample=self.apc_sample,\n desired_object=self.target_object)\n self.predicted_segment = self.apc_sample.unzoom_segment(\n zoomed_predicted_segment)\n\n self.predicted_segment = self.predicted_segment.astype('uint8')\n", "sub_path": "jsk_apc2016_common/python/jsk_apc2016_common/segmentation_in_bin/rbo_segmentation_in_bin.py", "file_name": "rbo_segmentation_in_bin.py", "file_ext": "py", "file_size_in_byte": 3567, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "rospkg.RosPack", "line_number": 11, "usage_type": "call"}, {"api_name": "image_geometry.cameramodels.PinholeCameraModel", "line_number": 24, "usage_type": "call"}, {"api_name": "image_geometry.cameramodels", "line_number": 24, "usage_type": "name"}, {"api_name": "jsk_apc2016_common.segmentation_in_bin.bin_data.BinData", "line_number": 32, "usage_type": "call"}, {"api_name": "pickle.load", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 72, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2HSV", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "jsk_apc2016_common.rbo_segmentation.apc_data.APCSample", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "455913881", "text": "# coding:utf-8\r\nimport requests\r\nfrom lxml import etree\r\n\r\n\r\nclass Tieba(object):\r\n def __init__(self, name):\r\n self.url = 'https://tieba.baidu.com/f?kw={0}&ie=utf-8&pn=0'.format(name)\r\n print(self.url)\r\n self.headers = {\r\n 'User-Agent': 'Mozilla / 5.0(Windows NT 10.0;WOW64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 78.0.3904.108Safari / 537.36'\r\n # 'User-Agent': \"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)\"\r\n }\r\n\r\n def get_data(self, url):\r\n response = requests.get(url, headers=self.headers)\r\n with open('tieba.html', 'wb') as f:\r\n f.write(response.content)\r\n return response.content\r\n\r\n def parse_data(self, data):\r\n # 创建element对象\r\n data = data.decode().replace('', '')\r\n html = etree.HTML(data)\r\n\r\n el_list = html.xpath('//*[@id=\"thread_list\"]/li[contains(@class,\"j_thread\")]/div/div[2]/div[1]/div[1]/a')\r\n print(len(el_list))\r\n\r\n data_list = []\r\n for el in el_list:\r\n temp = {}\r\n temp['title'] = el.xpath('./text()')[0]\r\n temp['link'] = 'https://tieba.baidu.com' + el.xpath('./@href')[0]\r\n data_list.append(temp)\r\n\r\n # 获取下一页url\r\n try:\r\n next_url = 'https:' + html.xpath('//a[contains(text(),\"下一页>\")]/@href')[0]\r\n except:\r\n next_url = None\r\n\r\n return data_list, next_url\r\n\r\n def save_data(self, data_list):\r\n for data in data_list:\r\n print(data)\r\n\r\n def run(self):\r\n # url\r\n # headers\r\n\r\n next_url = self.url\r\n\r\n while True:\r\n # 发送请求,获取响应\r\n data = self.get_data(next_url)\r\n # 从响应中提取数据(数据和翻页用的url)\r\n data_list, next_url = self.parse_data(data)\r\n\r\n self.save_data(data_list)\r\n # 判断是否终结\r\n if next_url == None:\r\n break\r\n\r\n\r\nif __name__ == '__main__':\r\n tieba = Tieba('东方')\r\n tieba.run()", "sub_path": "Python实例/Crawler/2.4.tieba_test.py", "file_name": "2.4.tieba_test.py", "file_ext": "py", "file_size_in_byte": 2102, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "requests.get", "line_number": 16, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 24, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 24, "usage_type": "name"}]} +{"seq_id": "91673414", "text": "# Copyright (c) 2020-2023 Antmicro \n#\n# SPDX-License-Identifier: Apache-2.0\n\n\"\"\"\nWrapper for TensorFlow optimizers.\n\"\"\"\n\nfrom typing import List, Tuple, Optional\nfrom kenning.utils.resource_manager import PathOrURI\nimport tensorflow as tf\n\nfrom kenning.core.optimizer import Optimizer\nfrom kenning.core.dataset import Dataset\n\n\nclass TensorFlowOptimizer(Optimizer):\n \"\"\"\n The TensorFlow optimizer.\n \"\"\"\n arguments_structure = {\n 'epochs': {\n 'description': 'Number of epochs for the training',\n 'type': int,\n 'default': 3\n },\n 'batch_size': {\n 'description': 'The size of a batch for the training',\n 'type': int,\n 'default': 32\n },\n 'optimizer': {\n 'description': 'Optimizer used during the training',\n 'type': str,\n 'default': 'adam',\n 'enum': ['adam', 'SGD', 'RMSprop']\n },\n 'disable_from_logits': {\n 'description': 'Determines whether output of the model is normalized', # noqa: E501\n 'type': bool,\n 'default': False\n }\n }\n\n def __init__(\n self,\n dataset: Dataset,\n compiled_model_path: PathOrURI,\n epochs: int = 10,\n batch_size: int = 32,\n optimizer: str = 'adam',\n disable_from_logits: bool = False):\n \"\"\"\n TensorFlowOptimizer framework.\n\n This class adds a functionality for classification models fine-tuning\n using a given dataset and compiler options.\n\n Parameters\n ----------\n dataset : Dataset\n Dataset used to train the model - may be used for quantization or\n fine-tuning.\n compiled_model_path : PathOrURI\n Path or URI where compiled model will be saved.\n epochs : int\n Number of epochs used to fine-tune the model.\n batch_size : int\n The size of a batch used for the fine-tuning.\n optimizer : str\n Optimizer used during the training.\n disable_from_logits : bool\n Determines whether output of the model is normalized.\n \"\"\"\n self.epochs = epochs\n self.batch_size = batch_size\n self.optimizer = optimizer\n self.disable_from_logits = disable_from_logits\n super().__init__(dataset, compiled_model_path)\n\n def prepare_train_validation(self) -> Tuple:\n \"\"\"\n Prepares train and validation datasets of the model\n and splits them into batches.\n\n Returns\n -------\n Tuple :\n Batched train and validation datasets.\n \"\"\"\n Xt, Xv, Yt, Yv = self.dataset.train_test_split_representations()\n\n Xt = self.dataset.prepare_input_samples(Xt)\n Yt = self.dataset.prepare_output_samples(Yt)\n traindataset = tf.data.Dataset.from_tensor_slices((Xt, Yt))\n traindataset = traindataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n Xv = self.dataset.prepare_input_samples(Xv)\n Yv = self.dataset.prepare_output_samples(Yv)\n validdataset = tf.data.Dataset.from_tensor_slices((Xv, Yv))\n validdataset = validdataset.batch(\n self.batch_size,\n num_parallel_calls=tf.data.experimental.AUTOTUNE\n )\n\n return traindataset, validdataset\n\n def train_model(self, model, callbacks: Optional[List] = None):\n \"\"\"\n Compiles and trains the given model.\n\n The function can be used to retrain the model if needed.\n\n Parameters\n ----------\n model : tf.keras.Model\n The keras model to retrain.\n callbacks : Optional[List]\n List of callback function to use during the training.\n\n Returns\n -------\n tf.keras.Model :\n Trained keras model.\n \"\"\"\n traindataset, validdataset = self.prepare_train_validation()\n\n if len(traindataset.element_spec[1].shape) == 1:\n loss = tf.keras.losses.SparseCategoricalCrossentropy(\n from_logits=not self.disable_from_logits\n )\n metrics = [\n tf.keras.metrics.SparseCategoricalAccuracy()\n ]\n else:\n loss = tf.keras.losses.CategoricalCrossentropy(\n from_logits=not self.disable_from_logits\n )\n metrics = [\n tf.keras.metrics.CategoricalAccuracy()\n ]\n\n model.compile(\n optimizer=self.optimizer,\n loss=loss,\n metrics=metrics\n )\n\n model.fit(\n traindataset,\n epochs=self.epochs,\n callbacks=callbacks,\n verbose=1,\n validation_data=validdataset\n )\n\n return model\n\n def get_framework_and_version(self):\n return ('tensorflow', tf.__version__)\n", "sub_path": "kenning/compilers/tensorflow_optimizers.py", "file_name": "tensorflow_optimizers.py", "file_ext": "py", "file_size_in_byte": 4974, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "kenning.core.optimizer.Optimizer", "line_number": 17, "usage_type": "name"}, {"api_name": "kenning.core.dataset.Dataset", "line_number": 47, "usage_type": "name"}, {"api_name": "kenning.utils.resource_manager.PathOrURI", "line_number": 48, "usage_type": "name"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 95, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 95, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 98, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 103, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 103, "usage_type": "attribute"}, {"api_name": "tensorflow.data", "line_number": 106, "usage_type": "attribute"}, {"api_name": "typing.Tuple", "line_number": 81, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 111, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 111, "usage_type": "name"}, {"api_name": "tensorflow.keras.losses.SparseCategoricalCrossentropy", "line_number": 132, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 132, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.SparseCategoricalAccuracy", "line_number": 136, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 136, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.losses.CategoricalCrossentropy", "line_number": 139, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 139, "usage_type": "attribute"}, {"api_name": "tensorflow.keras.metrics.CategoricalAccuracy", "line_number": 143, "usage_type": "call"}, {"api_name": "tensorflow.keras", "line_number": 143, "usage_type": "attribute"}, {"api_name": "tensorflow.__version__", "line_number": 163, "usage_type": "attribute"}]} +{"seq_id": "325226282", "text": "import pathlib\nimport json\nimport numpy as np\n\nimport xobjects as xo\nfrom xobjects.context import available\nimport xline as xl\nimport xtrack as xt\nimport xfields as xf\n\ndef test_collective_tracker():\n\n for CTX in xo.ContextCpu, xo.ContextPyopencl, xo.ContextCupy:\n if CTX not in available:\n continue\n\n print(f\"Test {CTX}\")\n context = CTX()\n\n test_data_folder = pathlib.Path(\n __file__).parent.joinpath('../test_data').absolute()\n path_sequence = test_data_folder.joinpath('sps_w_spacecharge/'\n 'line_with_spacecharge_and_particle.json')\n turn_by_turn_monitor = True\n\n ##################\n # Get a sequence #\n ##################\n\n with open(path_sequence, 'r') as fid:\n input_data = json.load(fid)\n sequence = xl.Line.from_dict(input_data['line'])\n\n # Replace all spacecharge with xobjects\n newseq = sequence.copy()\n _buffer = context.new_buffer()\n spch_elements = xf.replace_spaceharge_with_quasi_frozen(\n newseq, _buffer=_buffer)\n\n # For testing I make them frozen but I leave iscollective=True\n for ee in spch_elements:\n ee.update_mean_x_on_track = False\n ee.update_mean_y_on_track = False\n ee.update_sigma_x_on_track = False\n ee.update_sigma_y_on_track = False\n assert ee.iscollective\n\n #################\n # Build Tracker #\n #################\n print('Build tracker...')\n tracker= xt.Tracker(_buffer=_buffer,\n sequence=newseq,\n particles_class=xt.Particles,\n local_particle_src=None,\n save_source_as='source.c')\n\n assert tracker.iscollective\n assert tracker.track == tracker._track_with_collective\n\n ######################\n # Get some particles #\n ######################\n particles = xt.Particles(_context=context, **input_data['particle'])\n\n #########\n # Track #\n #########\n\n print('Track a few turns...')\n n_turns = 10\n tracker.track(particles, num_turns=n_turns,\n turn_by_turn_monitor=True)\n\n assert tracker.record_last_track.x.shape == (1, 10)\n\n #######################\n # Check against xline #\n #######################\n print('Check against xline ...')\n ip_check = 0\n vars_to_check = ['x', 'px', 'y', 'py', 'zeta', 'delta', 's']\n pyst_part = xl.Particles.from_dict(input_data['particle'])\n for _ in range(n_turns):\n sequence.track(pyst_part)\n\n for vv in vars_to_check:\n pyst_value = getattr(pyst_part, vv)\n xt_value = context.nparray_from_context_array(getattr(particles, vv))[ip_check]\n passed = np.isclose(xt_value, pyst_value, rtol=2e-8, atol=7e-9)\n print(f'Check var {vv}:\\n'\n f' pyst: {pyst_value: .7e}\\n'\n f' xtrack: {xt_value: .7e}\\n')\n if not passed:\n raise ValueError\n\n", "sub_path": "tests/test_collective_tracker.py", "file_name": "test_collective_tracker.py", "file_ext": "py", "file_size_in_byte": 3181, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "xobjects.ContextCpu", "line_number": 13, "usage_type": "attribute"}, {"api_name": "xobjects.ContextPyopencl", "line_number": 13, "usage_type": "attribute"}, {"api_name": "xobjects.ContextCupy", "line_number": 13, "usage_type": "attribute"}, {"api_name": "xobjects.context.available", "line_number": 14, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 20, "usage_type": "call"}, {"api_name": "json.load", "line_number": 31, "usage_type": "call"}, {"api_name": "xline.Line.from_dict", "line_number": 32, "usage_type": "call"}, {"api_name": "xline.Line", "line_number": 32, "usage_type": "attribute"}, {"api_name": "xfields.replace_spaceharge_with_quasi_frozen", "line_number": 37, "usage_type": "call"}, {"api_name": "xtrack.Tracker", "line_number": 52, "usage_type": "call"}, {"api_name": "xtrack.Particles", "line_number": 54, "usage_type": "attribute"}, {"api_name": "xtrack.Particles", "line_number": 64, "usage_type": "call"}, {"api_name": "xline.Particles.from_dict", "line_number": 83, "usage_type": "call"}, {"api_name": "xline.Particles", "line_number": 83, "usage_type": "attribute"}, {"api_name": "numpy.isclose", "line_number": 90, "usage_type": "call"}]} +{"seq_id": "517857491", "text": "from PyQt5 import QtWidgets\n\nfrom HeliSimulation import HeliSimulation, ModelType\n\n\nclass ModelFrame(QtWidgets.QFrame):\n\n def __init__(self, sim: HeliSimulation):\n QtWidgets.QFrame.__init__(self)\n\n self.sim = sim\n main_layout = QtWidgets.QVBoxLayout()\n self.setLayout(main_layout)\n\n # Maybe it would be more consistent if we read the model type on \"Start\" instead of changing it here\n self.model_1_button = QtWidgets.QRadioButton(\"Simple model\", self)\n self.model_1_button.toggled.connect(self.on_model_toggle)\n self.model_2_button = QtWidgets.QRadioButton(\"+ friction\", self)\n self.model_2_button.toggled.connect(self.on_model_toggle)\n self.model_3_button = QtWidgets.QRadioButton(\"+ centripetal forces\", self)\n self.model_3_button.toggled.connect(self.on_model_toggle)\n self.model_4_button = QtWidgets.QRadioButton(\"+ rotor speed\", self)\n self.model_4_button.toggled.connect(self.on_model_toggle)\n self.model_5_button = QtWidgets.QRadioButton(\"+ gyro moment\", self)\n self.model_5_button.toggled.connect(self.on_model_toggle)\n self.model_5_button.setChecked(True)\n\n self.check_limits_box = QtWidgets.QCheckBox(\"Limit angles\")\n self.check_limits_box.setCheckState(2 if sim.should_check_limits else 0)\n self.check_limits_box.toggled.connect(self.on_limit_check_toggle)\n\n self.check_dynamic_inertia_torque = QtWidgets.QCheckBox(\"Dynamic Inertia Torque\")\n self.check_dynamic_inertia_torque.setCheckState(2 if self.sim.dynamic_inertia_torque else 0)\n self.check_dynamic_inertia_torque.toggled.connect(self.on_dynamic_inertia_torque_toggle)\n\n layout_h = QtWidgets.QHBoxLayout()\n layout_h.addWidget(self.check_limits_box)\n layout_h.addWidget(self.check_dynamic_inertia_torque)\n\n main_layout.addWidget(self.model_1_button)\n main_layout.addWidget(self.model_2_button)\n main_layout.addWidget(self.model_3_button)\n main_layout.addWidget(self.model_4_button)\n main_layout.addWidget(self.model_5_button)\n main_layout.addLayout(layout_h)\n\n def on_model_toggle(self):\n if self.model_1_button.isChecked():\n self.sim.set_model_type(ModelType.EASY)\n elif self.model_2_button.isChecked():\n self.sim.set_model_type(ModelType.FRICTION)\n elif self.model_3_button.isChecked():\n self.sim.set_model_type(ModelType.CENTRIPETAL)\n elif self.model_4_button.isChecked():\n self.sim.set_model_type(ModelType.ROTORSPEED)\n elif self.model_5_button.isChecked():\n self.sim.set_model_type(ModelType.GYROMOMENT)\n\n def on_limit_check_toggle(self):\n self.sim.set_should_limit(self.check_limits_box.checkState() == 2)\n\n def on_dynamic_inertia_torque_toggle(self):\n self.sim.set_dynamic_inertia_torque(self.check_dynamic_inertia_torque.checkState() == 2)\n\n", "sub_path": "gui/ModelFrame.py", "file_name": "ModelFrame.py", "file_ext": "py", "file_size_in_byte": 2948, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 6, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 6, "usage_type": "name"}, {"api_name": "HeliSimulation.HeliSimulation", "line_number": 8, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QFrame.__init__", "line_number": 9, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QFrame", "line_number": 9, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 9, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 12, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 12, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QRadioButton", "line_number": 16, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 16, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QRadioButton", "line_number": 18, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 18, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QRadioButton", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 20, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QRadioButton", "line_number": 22, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 22, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QRadioButton", "line_number": 24, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 24, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QCheckBox", "line_number": 28, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 28, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QCheckBox", "line_number": 32, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 32, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QHBoxLayout", "line_number": 36, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 36, "usage_type": "name"}, {"api_name": "HeliSimulation.ModelType.EASY", "line_number": 49, "usage_type": "attribute"}, {"api_name": "HeliSimulation.ModelType", "line_number": 49, "usage_type": "name"}, {"api_name": "HeliSimulation.ModelType.FRICTION", "line_number": 51, "usage_type": "attribute"}, {"api_name": "HeliSimulation.ModelType", "line_number": 51, "usage_type": "name"}, {"api_name": "HeliSimulation.ModelType.CENTRIPETAL", "line_number": 53, "usage_type": "attribute"}, {"api_name": "HeliSimulation.ModelType", "line_number": 53, "usage_type": "name"}, {"api_name": "HeliSimulation.ModelType.ROTORSPEED", "line_number": 55, "usage_type": "attribute"}, {"api_name": "HeliSimulation.ModelType", "line_number": 55, "usage_type": "name"}, {"api_name": "HeliSimulation.ModelType.GYROMOMENT", "line_number": 57, "usage_type": "attribute"}, {"api_name": "HeliSimulation.ModelType", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "377246084", "text": "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\n\nfrom kubernetes import client, config\nfrom master_info.save_to_db import Save_Master_Data\n\nclass Get_info(object):\n\n def __init__(self,conf,client_func):\n self.config = conf\n self.client_func = client_func\n\n def get_info(self):\n node_status = {}\n ret = self.client_func.list_node()\n for index,func_info in enumerate(ret.items):\n role = func_info.metadata.labels.get(\"node-role.kubernetes.io/master\")\n for func_status in func_info.status.conditions:\n if \"True\" in func_status.status:\n\n if role == 'true':\n node_status.update({index:{'name': func_info.spec.external_id,\n 'status': \"Ready\",\n 'ROLES':'master',\n 'version': func_info.status.node_info.kubelet_version}})\n else:\n node_status.update({index:{'name': func_info.spec.external_id,\n 'status': \"Ready\",\n 'ROLES': 'node',\n 'version': func_info.status.node_info.kubelet_version}})\n else:\n if role == 'true':\n node_status.update({index:{'name': func_info.spec.external_id,\n 'status': \"NotReady\",\n 'ROLES':'master',\n 'version': func_info.status.node_info.kubelet_version}})\n else:\n node_status.update({index:{'name': func_info.spec.external_id,\n 'status': \"NotReady\",\n 'ROLES': 'node',\n 'version': func_info.status.node_info.kubelet_version}})\n Save_Master_Data().update_data(node_status)\n\n\nif __name__==\"__main__\":\n conf = config.load_kube_config()\n client_func = client.CoreV1Api()\n Get_info(conf,client_func)\n\n\n\n", "sub_path": "master_info/get_info.py", "file_name": "get_info.py", "file_ext": "py", "file_size_in_byte": 2200, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "master_info.save_to_db.Save_Master_Data", "line_number": 43, "usage_type": "call"}, {"api_name": "kubernetes.config.load_kube_config", "line_number": 47, "usage_type": "call"}, {"api_name": "kubernetes.config", "line_number": 47, "usage_type": "name"}, {"api_name": "kubernetes.client.CoreV1Api", "line_number": 48, "usage_type": "call"}, {"api_name": "kubernetes.client", "line_number": 48, "usage_type": "name"}]} +{"seq_id": "149280490", "text": "from typing import List\n\nfrom django.core.mail import EmailMessage\nfrom django.contrib.auth.models import User\n\nfrom .models import Task\nfrom .tasks import send_email_task\n\n\ndef get_list_of_tasks() -> List[Task]:\n \"\"\"\n Return list of all tasks\n\n :return tasks: list of all Task objects\n :rtype: list\n \"\"\"\n tasks = Task.objects.all()\n return list(tasks)\n\n\ndef get_task_by_id(pk: int) -> Task:\n \"\"\"\n Return Task object by id or raise exceptions\n\n :param pk: id of Task object\n :type pk: int\n\n :return task: Task object\n :rtype: Task\n \"\"\"\n try:\n task = Task.objects.get(id=pk)\n except Task.DoesNotExist as e:\n print(f\"Task object with id {pk} does not exist\")\n raise\n except Exception as e:\n print(f\"An exception {e} was thrown\")\n raise\n\n return task\n\n\ndef execute_task(user, task: Task) -> None:\n \"\"\"\n Execute task by set 'is_executed' to True. If it is already True, change to False.\n Send email to the user about task execution.\n\n :param task: Task object\n :type task: Task\n \"\"\"\n try:\n user_email = user.email\n except Exception:\n user_email = None\n \n if task.is_executed:\n task.is_executed = False\n send_email_task.delay(f\"Task <{task.title}> is unmarked.\", user_email)\n else:\n task.is_executed = True\n send_email_task.delay(f\"Task <{task.title}> is executed.\", user_email)\n task.save()\n", "sub_path": "api/services.py", "file_name": "services.py", "file_ext": "py", "file_size_in_byte": 1454, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "models.Task.objects.all", "line_number": 17, "usage_type": "call"}, {"api_name": "models.Task.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 17, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 10, "usage_type": "name"}, {"api_name": "models.Task", "line_number": 10, "usage_type": "name"}, {"api_name": "models.Task.objects.get", "line_number": 32, "usage_type": "call"}, {"api_name": "models.Task.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 32, "usage_type": "name"}, {"api_name": "models.Task.DoesNotExist", "line_number": 33, "usage_type": "attribute"}, {"api_name": "models.Task", "line_number": 33, "usage_type": "name"}, {"api_name": "models.Task", "line_number": 21, "usage_type": "name"}, {"api_name": "models.Task", "line_number": 43, "usage_type": "name"}, {"api_name": "tasks.send_email_task.delay", "line_number": 58, "usage_type": "call"}, {"api_name": "tasks.send_email_task", "line_number": 58, "usage_type": "name"}, {"api_name": "tasks.send_email_task.delay", "line_number": 61, "usage_type": "call"}, {"api_name": "tasks.send_email_task", "line_number": 61, "usage_type": "name"}]} +{"seq_id": "328485541", "text": "#Einsendeaufgabe 3 - Data Science - Anna Weigand - Python game SNAKE\n\nimport pygame\nimport sys\nimport time\nimport random\n\n#Initialising the game\npygame.init()\n\n#Game Surface\n#Each patch is 10 x 10 px -> 70x35 patches\ngameSurface = pygame.display.set_mode((700,350))\npygame.display.set_caption(\"Python Snake Game\")\npygame.time.wait(1000)\n\n#Colors\nblack = pygame.Color(0,0,0)#Background\nwhite = pygame.Color(255,255,255)#Score\ngreen = pygame.Color(173,255,47)#Snake\norange = pygame.Color(255,165,0)#Food\nred = pygame.Color(255,64,64)#GameOver\n\n#FPS Controller\nfpsController = pygame.time.Clock()\n\n#Variables and directions\nclass Snake:\n def __init__(self):\n self.body = [[40,50],[30,50],[20,50]]\n self.position = [50, 50]\n self.direction = \"RIGHT\"\n self.score = 0\n\n def changeDirection(self, dir):\n if dir == 'RIGHT' and not self.direction == 'LEFT':\n self.direction = 'RIGHT'\n if dir == 'LEFT' and not self.direction == 'RIGHT':\n self.direction = 'LEFT'\n if dir == 'UP' and not self.direction == 'DOWN':\n self.direction = 'UP'\n if dir == 'DOWN' and not self.direction == 'UP':\n self.direction = 'DOWN'\n\n def move(self,food):\n if self.direction == 'RIGHT':\n mv = [10,0]\n if self.direction == 'LEFT':\n mv = [-10,0]\n if self.direction == 'UP':\n mv = [0,-10]\n if self.direction == 'DOWN':\n mv = [0,10]\n \n #Change position according to mv\n #Itemwise addition of two lists\n self.position = [self.position[i] + mv[i] for i in range(len(self.position))]\n\n #Check if out of bounds\n w,h = pygame.display.get_surface().get_size()\n if self.position[0]<0 or self.position[0]>= w:\n gameOver(self)\n if self.position[1]<0 or self.position[1]>= h:\n gameOver(self)\n \n #Check if eating myself\n if self.position in self.body:\n gameOver(self)\n\n #Check if snake found food\n if self.position == food.position:\n #If yes: grow (add new position to current body)\n self.body = [self.position] + self.body\n #Increase score\n self.score += 1\n #Regrow food\n food.regrow()\n #If food was generated within existing snake, regrow\n while food.position in self.body:\n food.regrow()\n #TODO check if any fields possible at all!!\n else:\n #... just move\n #Append body except last element to self position as new body\n self.body = [self.position] + self.body[:len(self.body)-1]\n\n\nclass Food:\n def __init__(self):\n #Get random position from display size in 10s of pxls\n self.regrow()\n\n def regrow(self):\n #Get random position from display size in 10s of pxls\n w,h = pygame.display.get_surface().get_size()\n self.position = [random.randrange(0, (w / 10)-1)*10,\n random.randrange(0, (h / 10)-1)*10]\n\ndef showScore(snake):\n scoreFont = pygame.font.SysFont('arial',24)\n scoreSurf = scoreFont.render('Score: {0}'.format(snake.score), True, white)\n scoreRect = scoreSurf.get_rect()\n scoreRect.midtop = (80,10)\n gameSurface.blit(scoreSurf, scoreRect)\n\n#Game Over\ndef gameOver(snake):\n myFont = pygame.font.SysFont('arial', 30)\n gameOverSurf = myFont.render(\n \"Snake Game | Your score: \"+str(snake.score)+\" | GAME OVER\",\n True, red)\n gameOverRect = gameOverSurf.get_rect()\n w,h = pygame.display.get_surface().get_size()\n gameOverRect.midtop = (w/2, h/2)\n gameSurface.blit(gameOverSurf, gameOverRect)\n pygame.display.flip()\n time.sleep(3)\n pygame.quit()\n sys.exit()\n\n#Initialising snake and food:\nsn = Snake()\nfd = Food()\n\n#Game Start\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n gameOver(sn)\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_RIGHT:\n sn.changeDirection(\"RIGHT\")\n if event.key == pygame.K_LEFT:\n sn.changeDirection(\"LEFT\")\n if event.key == pygame.K_UP:\n sn.changeDirection(\"UP\")\n if event.key == pygame.K_DOWN:\n sn.changeDirection(\"DOWN\")\n if event.key == pygame.K_ESCAPE:\n pygame.event.post(pygame.event.Event(pygame.QUIT))\n sn.move(fd)\n\n #Clear screen\n gameSurface.fill(black)\n \n #Draw snake\n for position in sn.body:\n pygame.draw.rect(gameSurface,green,\n pygame.Rect(position[0],position[1],10,10))\n \n #Draw food\n pygame.draw.rect(gameSurface,orange,\n pygame.Rect(fd.position[0],fd.position[1],10,10))\n\n #Show Score\n showScore(sn)\n pygame.display.flip()\n fpsController.tick(10)", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 4947, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pygame.init", "line_number": 9, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 14, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pygame.time.wait", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.Color", "line_number": 18, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 19, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 20, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 21, "usage_type": "call"}, {"api_name": "pygame.Color", "line_number": 22, "usage_type": "call"}, {"api_name": "pygame.time.Clock", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.display.get_surface", "line_number": 60, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 60, "usage_type": "attribute"}, {"api_name": "pygame.display.get_surface", "line_number": 95, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 95, "usage_type": "attribute"}, {"api_name": "random.randrange", "line_number": 96, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 97, "usage_type": "call"}, {"api_name": "pygame.font.SysFont", "line_number": 100, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 100, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 108, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 108, "usage_type": "attribute"}, {"api_name": "pygame.display.get_surface", "line_number": 113, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 113, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 116, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 116, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 117, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 118, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 119, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 127, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 127, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHT", "line_number": 131, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFT", "line_number": 133, "usage_type": "attribute"}, {"api_name": "pygame.K_UP", "line_number": 135, "usage_type": "attribute"}, {"api_name": "pygame.K_DOWN", "line_number": 137, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 139, "usage_type": "attribute"}, {"api_name": "pygame.event.post", "line_number": 140, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pygame.event.Event", "line_number": 140, "usage_type": "call"}, {"api_name": "pygame.QUIT", "line_number": 140, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 148, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 148, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 149, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 152, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pygame.Rect", "line_number": 153, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 157, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 157, "usage_type": "attribute"}]} +{"seq_id": "92628553", "text": "import os\nimport torch\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset, DataLoader\nimport rbm_datasets\nfrom joblib import Parallel, delayed\nimport multiprocessing\nimport utils\nimport copy\nimport pickle\nimport time\n\nfrom rbm import RBM\nfa_dataset = rbm_datasets.FuncDataset(3, lambda A, B, C: [A^B^C, (A&B)|(C&(A^B))], 2**15)\n\ndef train_fa(faRBM, batch_size, itersize):\n ########## CONFIGURATION ##########\n BATCH_SIZE = batch_size\n VISIBLE_UNITS = 5\n HIDDEN_UNITS = 8\n CD_K = 1\n EPOCHS = 10\n LEARN_RATE = 1\n EXIT_ERROR = 1e-10\n CUDA = False\n \n ########## TRAINING FULL ADDER ##########\n print('Training Full Adder Gate...')\n \n arr = []\n fa_errors = []\n epoch_errors = []\n \n \n #initialize rbm and train loader for given hyper parameters\n train_loader = torch.utils.data.DataLoader(fa_dataset, batch_size=BATCH_SIZE)\n \n #Train RBM and calculate reconstruction errors\n corr_probs = utils.generateDistribution(3, 2, lambda A, B, C: [A^B^C, (A&B)|(C&(A^B))])\n \n t0 = time.time()\n #Train RBM and calculate reconstruction errors\n for x in range(itersize):\n #faRBM.learning_rate=faRBM.learning_rate/5\n #faRBM.weight_decay=faRBM.weight_decay/5\n faRBM.k = faRBM.k * 5\n err = faRBM.train(train_loader, EPOCHS, mult_thresh=1e8, exit_error=EXIT_ERROR)\n #print(faRBM.weights)\n fa_errors.append(err)\n #print('Distance to Target:' + str(utils.EuDist(list(faRBM.generate_statistics(1000).values()), corr_probs)))\n print('Iteration {0}, batch size {1}, iters {2} complete'.format(x, batch_size, itersize))\n t1 = time.time()\n print('total time taken=' + str(t1 - t0) + ' for batchsize:' + str(batch_size) + ' and iters' + str(itersize))\n########## CONFIGURATION ##########\nBATCH_SIZE = 64\nVISIBLE_UNITS = 5\nHIDDEN_UNITS = 8\nCD_K = 1\nEPOCHS = 10\nLEARN_RATE = 1\nEXIT_ERROR = 1e-10\nCUDA = False\n\n########## TRAINING FULL ADDER ##########\n \nbatches = [64]\niters = [5]*20\n\nbatch_rbms = [RBM(int(VISIBLE_UNITS), int(HIDDEN_UNITS), int(CD_K), use_cuda=CUDA, decay_type = 'L2', use_momentum=False, \\\n learning_rate=LEARN_RATE, weight_decay=1e-4) for _ in range(len(batches))]\nParallel(n_jobs=len(batches))(delayed(train_fa)(batch_rbms[i], batches[i], 5) for i in range(len(batches)))\n\niter_rbms =[RBM(int(VISIBLE_UNITS), int(HIDDEN_UNITS), int(CD_K), use_cuda=CUDA, decay_type = 'L2', use_momentum=False, \\\n learning_rate=LEARN_RATE, weight_decay=1e-4) for _ in range(len(iters))]\n\nParallel(n_jobs=len(iters))(delayed(train_fa)(iter_rbms[i], 64, iters[i]) for i in range(len(iters)))\n\nfor i, batch in enumerate(batch_rbms):\n pickle.dump(batch, 'faRBMbatchsize{0}.p'.format(batches[i]))\n\n\nfor i, it in enumerate(iters):\n pickle.dump(it, 'faRBMitersize{0}.p'.format(iters[i]))\n\n", "sub_path": "LogicRBM/parallel_fa.py", "file_name": "parallel_fa.py", "file_ext": "py", "file_size_in_byte": 2884, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "rbm_datasets.FuncDataset", "line_number": 15, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 37, "usage_type": "attribute"}, {"api_name": "utils.generateDistribution", "line_number": 40, "usage_type": "call"}, {"api_name": "time.time", "line_number": 42, "usage_type": "call"}, {"api_name": "time.time", "line_number": 53, "usage_type": "call"}, {"api_name": "rbm.RBM", "line_number": 70, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 72, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 72, "usage_type": "call"}, {"api_name": "rbm.RBM", "line_number": 74, "usage_type": "call"}, {"api_name": "joblib.Parallel", "line_number": 77, "usage_type": "call"}, {"api_name": "joblib.delayed", "line_number": 77, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 80, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 84, "usage_type": "call"}]} +{"seq_id": "154811973", "text": "#\n#\n# \n#\n#\n\nimport os\nimport logging\n\nimport dumpsql.dumpsql\n\n#===============================================================================\n# project's settings\n#\n# o for __version__ format string, see https://www.python.org/dev/peps/pep-0440/ :\n# e.g. \"0.1.2.dev1\" or \"0.1a\"\n#\n# o See also https://pypi.python.org/pypi?%3Aaction=list_classifiers\n#\n#===============================================================================\n__projectname__ = \"dysodosdb\"\n__version__ = \"0.0.0\"\n__laststableversion__ = \"0.0.0\"\n__author__ = \"Xavier Faure (suizokukan / 94.23.197.37)\"\n__copyright__ = \"Copyright 2017, suizokukan\"\n__license__ = \"GPL-3.0\"\n__licensepypi__ = 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)'\n__maintainer__ = \"Xavier Faure (suizokukan)\"\n__email__ = \"suizokukan@orange.fr\"\n__status__ = \"Pre-Alpha\"\n__statuspypi__ = 'Development Status :: 2 - Pre-Alpha'\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass DysodosDB(object):\n\n # textdump/db format :\n databaseformat_supported = ('standard',)\n formatversion_supported = (1,)\n\n # tables in the two databases :\n apparatus_tables = ('system', 'theories', 'languages', 'laws')\n verba_tables = ('verba',)\n\n def __init__(self, path=None):\n LOGGER.info(\"DysodosDB (version %s), using DumpSQL (version %s)\",\n __version__,\n dumpsql.dumpsql.__version__)\n LOGGER.info(\"formatversion_supported=%s; \"\n \"tables in apparatus=%s; tables in verba=%s\",\n DysodosDB.formatversion_supported,\n DysodosDB.apparatus_tables,\n DysodosDB.verba_tables)\n\n self.initialized = False\n\n if path is None:\n self.path = os.path.join(os.path.dirname(__file__), \"databases\")\n else:\n self.path = path\n\n # DumpSQL objects :\n self.apparatus = None\n self.verba = None\n\n def close(self):\n LOGGER.debug(\"\")\n self.initialized = False\n\n if self.apparatus is not None:\n self.apparatus.close()\n\n if self.verba is not None:\n self.verba.close()\n\n def dump2db(self):\n self.close()\n\n self.initialized = False\n\n if not self.dump2db_apparatus():\n LOGGER.error(\"can't write 'apparatus' table.\")\n self.initialized = False\n\n if not self.dump2db_verba():\n LOGGER.error(\"can't write 'verba' table.\")\n self.initialized = False\n\n self.initialized = True\n return self\n\n def dump2db_verba(self):\n LOGGER.debug(\"\")\n\n dbname = \"verba\"\n\n dbfilename = os.path.join(self.path, dbname+\".db\")\n LOGGER.info(\"writing %s ...\", dbfilename)\n\n dumpfilenames = []\n for tablename in DysodosDB.verba_tables:\n dumpfilenames.append(os.path.join(self.path, dbname+\".\"+tablename+\".\"+\"textdump\"))\n\n self.verba = dumpsql.dumpsql.DumpSQL(dumpfilenames=dumpfilenames,\n dbfilename=dbfilename).dump2db()\n\n return self.verba.initialized\n\n def dump2db_apparatus(self):\n LOGGER.debug(\"\")\n\n dbname = \"apparatus\"\n\n dbfilename = os.path.join(self.path, dbname+\".db\")\n LOGGER.info(\"writing %s ...\", dbfilename)\n\n dumpfilenames = []\n for tablename in DysodosDB.apparatus_tables:\n dumpfilenames.append(os.path.join(self.path, dbname+\".\"+tablename+\".\"+\"textdump\"))\n\n self.apparatus = dumpsql.dumpsql.DumpSQL(dumpfilenames=dumpfilenames,\n dbfilename=dbfilename).dump2db()\n\n return self.apparatus.initialized\n\n def open_db(self):\n self.close()\n\n dbname = \"apparatus\"\n dbfilename = os.path.join(self.path, dbname+\".db\")\n self.apparatus = dumpsql.dumpsql.DumpSQL(dbfilename=dbfilename).open_db()\n\n dbname = \"verba\"\n dbfilename = os.path.join(self.path, dbname+\".db\")\n self.verba = dumpsql.dumpsql.DumpSQL(dbfilename=dbfilename).open_db()\n\n self.initialized = True\n self.initialized = self.initialized and self.apparatus.initialized\n self.initialized = self.initialized and self.verba.initialized\n\n return self\n", "sub_path": "dysodosdb/dysodosdb.py", "file_name": "dysodosdb.py", "file_ext": "py", "file_size_in_byte": 4252, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "logging.getLogger", "line_number": 33, "usage_type": "call"}, {"api_name": "dumpsql.dumpsql.dumpsql", "line_number": 49, "usage_type": "attribute"}, {"api_name": "dumpsql.dumpsql", "line_number": 49, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path", "line_number": 59, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 59, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 98, "usage_type": "call"}, {"api_name": "os.path", "line_number": 98, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "dumpsql.dumpsql.dumpsql.DumpSQL", "line_number": 105, "usage_type": "call"}, {"api_name": "dumpsql.dumpsql.dumpsql", "line_number": 105, "usage_type": "attribute"}, {"api_name": "dumpsql.dumpsql", "line_number": 105, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 115, "usage_type": "call"}, {"api_name": "os.path", "line_number": 115, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 120, "usage_type": "call"}, {"api_name": "os.path", "line_number": 120, "usage_type": "attribute"}, {"api_name": "dumpsql.dumpsql.dumpsql.DumpSQL", "line_number": 122, "usage_type": "call"}, {"api_name": "dumpsql.dumpsql.dumpsql", "line_number": 122, "usage_type": "attribute"}, {"api_name": "dumpsql.dumpsql", "line_number": 122, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "dumpsql.dumpsql.dumpsql.DumpSQL", "line_number": 132, "usage_type": "call"}, {"api_name": "dumpsql.dumpsql.dumpsql", "line_number": 132, "usage_type": "attribute"}, {"api_name": "dumpsql.dumpsql", "line_number": 132, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 135, "usage_type": "call"}, {"api_name": "os.path", "line_number": 135, "usage_type": "attribute"}, {"api_name": "dumpsql.dumpsql.dumpsql.DumpSQL", "line_number": 136, "usage_type": "call"}, {"api_name": "dumpsql.dumpsql.dumpsql", "line_number": 136, "usage_type": "attribute"}, {"api_name": "dumpsql.dumpsql", "line_number": 136, "usage_type": "name"}]} +{"seq_id": "179880459", "text": "import sys\nfrom collections import deque\n\nsys.stdin = open(\"input.txt\",\"r\")\n\ndirection = [(-1,0),(-1,1),(0,1),(1,1),(1,0),(1,-1),(0,-1),(-1,-1)]\n\ndef inRange(a,b):\n if 0<=a= age:\n A[r][c] -= age\n result.append([age+1,1])\n if (age+1) % 5 == 0:\n Fall.append((r,c))\n else:\n result.append([age,0])\n return result\n\ndef summer(r,c,a):\n while a:\n i = a.pop()\n live = i[1]\n age = i[0]\n if live == 0:\n A[r][c] += age // 2\n else:\n a.append([age,live])\n break\ndef fall(Fall):\n for i in Fall:\n for j in direction:\n nextPos = (i[0]+j[0], i[1]+j[1])\n if inRange(nextPos[0],nextPos[1]):\n tree[nextPos[0]][nextPos[1]].appendleft([1,1])\ndef winter():\n for i in range(n):\n for j in range(n):\n plus = A_plus[i][j]\n A[i][j] += plus\n\nfor year in range(k):\n Fall =[]\n #봄 & 여름\n for i in range(n):\n for j in range(n):\n if tree[i][j] != deque():\n result = spring(i,j,tree[i][j])\n tree[i][j] = result\n summer(i,j,tree[i][j])\n #가을\n fall(Fall)\n #겨울\n winter()\n\ndef tree_live(a):\n global count\n for i in a:\n if i[1] == 1:\n count+=1\n else:\n return\ncount = 0\nfor i in range(n):\n for j in range(n):\n if tree[i][j] != deque():\n tree_live(tree[i][j])\nprint(count)", "sub_path": "나무 재테크.py", "file_name": "나무 재테크.py", "file_ext": "py", "file_size_in_byte": 2140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.stdin", "line_number": 4, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.stdin.readline", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 15, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.stdin.readline", "line_number": 20, "usage_type": "call"}, {"api_name": "sys.stdin", "line_number": 20, "usage_type": "attribute"}, {"api_name": "collections.deque", "line_number": 29, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 69, "usage_type": "call"}, {"api_name": "collections.deque", "line_number": 88, "usage_type": "call"}]} +{"seq_id": "631215370", "text": "import csv\nimport os\nimport io\nimport argparse\nimport datetime\n\ndef init():\n global csv_source_path\n global print_to_screen\n global export_to_file\n global export_dest_path\n global start_exec\n\n start_exec = datetime.datetime.now()\n\n parser = argparse.ArgumentParser(description='PyBank Application')\n parser.add_argument('--csv_source_path',type=str, metavar='', required=False, help='path for source CSV dataset(budget_data.csv)', default='resources/budget_data.csv')\n parser.add_argument('--print_to_screen',type=lambda x: (str(x).lower() in ['true','1', 'yes']), metavar='', required=False, help='print analysis result to screen', default=True)\n parser.add_argument('--export_to_file',type=lambda x: (str(x).lower() in ['true','1', 'yes']), metavar='', required=False, help='export analysis result to file', default=True)\n parser.add_argument('--export_dest_path',type=str, metavar='', required=False, help='path for export analysis file', default='resources/result.txt')\n args = vars(parser.parse_args())\n\n csv_source_path = args['csv_source_path']\n print_to_screen = args['print_to_screen']\n export_to_file = args['export_to_file']\n export_dest_path = args['export_dest_path']\n\n splash_screen()\n\ndef clear():\n os.system('cls' if os.name == 'nt' else 'clear')\n\ndef splash_screen():\n clear()\n \n print (f'''\nPyBank Application - Data Science BootCamp 2019\n\nParameters:\n Path for source CSV dataset.......: {csv_source_path}\n Print analysis result to screen...: {print_to_screen}\n Export analysis result to file....: {export_to_file}\n Path for export analysis file.....: {export_dest_path}\n\n''')\n\ndef perform_analysis():\n profit_lost = []\n amount_change = []\n\n avg_change = 0.0\n total_amount_change = 0.0\n row_offset = 2\n\n analysis_result = io.StringIO() \n\n with open(file=csv_source_path, mode='r', encoding='utf-8') as csv_file:\n csv_reader = csv.DictReader(csv_file, delimiter=',')\n \n for line in csv_reader:\n profit_lost.append(float(line[\"Profit/Losses\"]))\n \n if ( len(profit_lost) > 1):\n # actual - previous\n amount_change.append(tuple([line[\"Date\"], profit_lost[len(profit_lost)-1] - profit_lost[len(profit_lost)-2] ]))\n\n if ( csv_reader.line_num > row_offset):\n total_amount_change = sum([pair[1] for pair in amount_change])\n\n avg_change = total_amount_change / (csv_reader.line_num - row_offset)\n max_change = max(amount_change, key=lambda item: item[1])\n min_change = min(amount_change, key=lambda item: item[1])\n \n analysis_result.write(\"Financial Analysis\\n\")\n analysis_result.write(\"--------------------------------\\n\")\n analysis_result.write(f\"Total Months...................: {len(profit_lost)}\\n\")\n analysis_result.write(f\"Total..........................: $ {sum(profit_lost):,.2f}\\n\")\n analysis_result.write(f\"Average Change.................: $ {avg_change:,.2f}\\n\")\n analysis_result.write (f\"Greatest Increase in Profits...: {max_change[0]} ($ {max_change[1]:,.2f})\\n\")\n analysis_result.write (f\"Greatest Decrease in Profits...: {min_change[0]} ($ {min_change[1]:,.2f})\\n\")\n\n return analysis_result.getvalue()\n\ndef perform_output(result):\n if ( print_to_screen == True):\n print (result)\n \n if ( export_to_file == True):\n with open(export_dest_path, \"w\") as f:\n f.writelines(result)\n\ndef end():\n seconds_elapsed = (datetime.datetime.now() - start_exec).total_seconds()\n print (f\"Process completed in {seconds_elapsed} second(s)\")\n", "sub_path": "Unit3/PyBank/functions.py", "file_name": "functions.py", "file_ext": "py", "file_size_in_byte": 3711, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "datetime.datetime.now", "line_number": 14, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 14, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "os.system", "line_number": 31, "usage_type": "call"}, {"api_name": "os.name", "line_number": 31, "usage_type": "attribute"}, {"api_name": "io.StringIO", "line_number": 55, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 93, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 93, "usage_type": "attribute"}]} +{"seq_id": "412145458", "text": "import os\nfrom .data import GRID\nfrom .helpers import *\n\nclass MQL(object):\n def __init__(self, mqlDir, mqlName, tfFeatures, tm):\n self.mqlDir = mqlDir\n cleanDb = cleanName(mqlName)\n if cleanDb != mqlName:\n self.tm.error('db name \"{}\" => \"{}\"'.format(mqlName, cleanDb))\n self.mqlName = cleanDb\n self.tfFeatures = tfFeatures\n self.tm = tm\n self._check()\n\n def write(self):\n if not self.good: return\n if not os.path.exists(self.mqlDir):\n try:\n os.makedirs(self.mqlDir, exist_ok=True)\n except:\n self.tm.error('Cannot create directory \"{}\"'.format(self.mqlDir))\n self.good = False\n return\n mqlPath = '{}/{}.mql'.format(self.mqlDir, self.mqlName)\n try:\n fm = open(mqlPath, 'w', encoding='utf8')\n except:\n self.tm.error('Could not write to {}'.format(mqlPath))\n self.good = False\n return\n self.fm = fm\n self._writeStartDb()\n self._writeTypes()\n self._writeDataAll()\n self._writeEndDb()\n self.tm.indent(level=0)\n self.tm.info('Done')\n\n def _check(self):\n self.tm.info('Checking features of dataset {}'.format(self.mqlName))\n self.features = {}\n self.featureList = []\n self.tm.indent(level=1)\n for (f, fo) in sorted(self.tfFeatures.items()):\n if fo.method != None or f in GRID: continue\n fo.load(metaOnly=True)\n if fo.isConfig: continue\n cleanF = cleanName(f)\n if cleanF != f:\n self.tm.error('feature \"{}\" => \"{}\"'.format(f, cleanF))\n self.featureList.append(cleanF)\n self.features[cleanF] = fo\n good = True\n for feat in (GRID[0], GRID[1], '__levels__'):\n if feat not in self.tfFeatures:\n self.tm.error('{} feature {} is missing from data set'.format(\n 'Grid' if feat in GRID else 'Computed' if feat.startswith('__') else 'Data',\n feat,\n ))\n good = False\n else:\n fObj = self.tfFeatures[feat]\n if not fObj.load():\n good = False\n self.tm.indent(level=0)\n if (not good):\n self.tm.error('Export to MQL aborted')\n else:\n self.tm.info('{} features to export to MQL ...'.format(len(self.featureList)))\n self.good = good\n\n def _writeStartDb(self):\n self.fm.write('''\nCREATE DATABASE '{name}'\nGO\nUSE DATABASE '{name}'\nGO\n'''.format(name=self.mqlName))\n\n\n def _writeEndDb(self):\n self.fm.write('''\nVACUUM DATABASE ANALYZE\nGO\n''')\n self.fm.close()\n\n def _writeTypes(self):\n def valInt(n): return str(n)\n def valStr(s):\n if \"'\" in s:\n return '\"{}\"'.format(s.replace('\"', '\\\\\"'))\n else:\n return \"'{}'\".format(s)\n def valIds(ids): return '({})'.format(','.join(str(i) for i in ids))\n\n self.levels = self.tfFeatures['__levels__'].data[::-1]\n self.tm.info('Loading {} features'.format(len(self.featureList)))\n for ft in self.featureList:\n fObj = self.features[ft]\n fObj.load()\n\n self.tm.indent(level=0)\n self.tm.info('Mapping {} features onto {} object types'.format(\n len(self.featureList), len(self.levels),\n ))\n otypeSupport = {}\n for (otype, av, start, end) in self.levels:\n cleanOtype = cleanName(otype)\n if cleanOtype != otype:\n self.tm.error('otype \"{}\" => \"{}\"'.format(otype, cleanOtype))\n otypeSupport[cleanOtype] = set(range(start, end+1))\n\n self.otypes = {}\n self.featureTypes = {}\n self.featureMethods = {}\n for ft in self.featureList:\n fObj = self.features[ft]\n if fObj.isEdge:\n dataType = 'LIST OF id_d'\n method = valIds\n else:\n if fObj.dataType == 'str':\n dataType = 'string DEFAULT \"\"'\n method = valStr\n elif fObj.dataType == 'int':\n dataType = 'integer DEFAULT 0'\n method = valInt\n else:\n dataType = 'string DEFAULT \"\"'\n method = valStr\n self.featureTypes[ft] = dataType\n self.featureMethods[ft] = method\n\n support = set(fObj.data.keys())\n for otype in otypeSupport:\n if len(support & otypeSupport[otype]):\n self.otypes.setdefault(otype, []).append(ft)\n\n for otype in (cleanName(x[0]) for x in self.levels):\n self._writeType(otype)\n\n def _writeType(self, otype):\n self.fm.write('''\nCREATE OBJECT TYPE\n[{}\n'''.format(otype))\n for ft in self.otypes[otype]:\n self.fm.write(' {}:{};\\n'.format(ft, self.featureTypes[ft]))\n self.fm.write('''\n]\nGO\n''')\n\n def _writeDataAll(self):\n self.tm.info('Writing {} features as data in {} object types'.format(\n len(self.featureList), len(self.levels),\n ))\n self.oslots = self.tfFeatures[GRID[1]].data\n for (otype, av, start, end) in self.levels:\n self._writeData(otype, start, end)\n\n def _writeData(self, otype, start, end):\n tm = self.tm\n fm = self.fm\n tm.indent(level=1, reset=True)\n tm.info('{} data ...'.format(otype))\n oslots = self.oslots\n maxSlot = oslots[-1]\n oFeats = self.otypes[otype]\n features = self.features\n featureTypes = self.featureTypes\n featureMethods = self.featureMethods\n fm.write('''\nDROP INDEXES ON OBJECT TYPE[{o}]\nGO\nCREATE OBJECTS\nWITH OBJECT TYPE[{o}]\n'''.format(o=otype))\n curSize = 0\n LIMIT = 50000\n t = 0\n j = 0\n tm.indent(level=2, reset=True)\n for n in range(start, end + 1):\n oMql = '''\nCREATE OBJECT\nFROM MONADS= {{ {m} }} \nWITH ID_D={i} [\n'''.format(\n m=n if n <= maxSlot else specFromRanges(rangesFromList(oslots[n-maxSlot-1])),\n i=n,\n)\n for ft in oFeats:\n tp = featureTypes[ft]\n method = featureMethods[ft]\n fMap = features[ft].data\n if n in fMap:\n oMql += '{}:={};\\n'.format(ft, method(fMap[n]))\n oMql += '''\n]\n'''\n fm.write(oMql)\n curSize += len(bytes(oMql, encoding='utf8'))\n t += 1\n j += 1\n if j == LIMIT:\n fm.write('''\nGO\nCREATE OBJECTS\nWITH OBJECT TYPE[{o}]\n'''.format(o=otype))\n tm.info('batch of size {:>20} with {:>7} of {:>7} {}s'.format(nbytes(curSize), j, t, otype))\n j = 0\n curSize = 0\n\n tm.info('batch of size {:>20} with {:>7} of {:>7} {}s'.format(nbytes(curSize), j, t, otype))\n fm.write('''\nGO\nCREATE INDEXES ON OBJECT TYPE[{o}]\nGO\n'''.format(o=otype))\n\n tm.indent(level=1)\n tm.info('{} data: {} objects'.format(otype, t))\n", "sub_path": "tf/mql.py", "file_name": "mql.py", "file_ext": "py", "file_size_in_byte": 7187, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.path.exists", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 20, "usage_type": "call"}, {"api_name": "data.GRID", "line_number": 46, "usage_type": "name"}, {"api_name": "data.GRID", "line_number": 55, "usage_type": "name"}, {"api_name": "data.GRID", "line_number": 58, "usage_type": "name"}, {"api_name": "data.GRID", "line_number": 160, "usage_type": "name"}]} +{"seq_id": "427154145", "text": "from rest_framework import renderers, status\n\nfrom manager.testing import DatabaseTestCase\n\n\nclass NodeViewsTest(DatabaseTestCase):\n \"\"\"Test creating and retrieving nodes.\"\"\"\n\n # Type specific CRUD methods for Nodes\n\n def create_node(self, user=None, project=None, node=None):\n return self.create(\n user,\n \"nodes\",\n data={\"project\": project, \"node\": node},\n headers={\"HTTP_ACCEPT\": \"application/json\"},\n )\n\n def retrieve_json(self, user, key):\n return self.retrieve(\n user,\n \"nodes\",\n kwargs={\"key\": key},\n headers={\"HTTP_ACCEPT\": \"application/json\"},\n )\n\n def retrieve_html(self, user, key):\n return self.retrieve(\n user, \"nodes\", kwargs={\"key\": key}, headers={\"HTTP_ACCEPT\": \"text/html\"},\n )\n\n # Testing methods\n\n def test_create_ok(self):\n node = {\"type\": \"CodeChunk\", \"text\": \"plot(1, 1)\"}\n\n response = self.create(self.ada, \"nodes\", {\"node\": node})\n assert response.status_code == status.HTTP_201_CREATED\n\n response = self.create(\n self.ada, \"nodes\", {\"node\": node, \"project\": self.ada_private.id}\n )\n assert response.status_code == status.HTTP_201_CREATED\n\n response = self.create(\n self.ada,\n \"nodes\",\n {\n \"node\": node,\n \"project\": self.ada_public.id,\n \"app\": \"an-app\",\n \"host\": \"http://example.org\",\n },\n )\n assert response.status_code == status.HTTP_201_CREATED\n\n response = self.create_node(self.ada, self.ada_public.id, node)\n assert response.status_code == status.HTTP_201_CREATED\n assert response.data[\"key\"] is not None\n assert response.data[\"url\"] is not None\n\n def test_create_must_be_authenticated(self):\n response = self.create_node()\n assert response.status_code == status.HTTP_403_FORBIDDEN\n\n def test_create_must_have_project_edit_permission(self):\n response = self.create_node(self.ada, self.bob_private.id, 42)\n assert response.status_code == status.HTTP_403_FORBIDDEN\n\n def test_create_must_have_required_fields(self):\n response = self.create(\n self.ada, \"nodes\", headers={\"HTTP_ACCEPT\": \"application/json\"}\n )\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n assert response.data == {\n \"message\": \"Invalid input.\",\n \"errors\": [{\"field\": \"node\", \"message\": \"This field is required.\"}],\n }\n\n def test_create_host_must_be_url(self):\n response = self.create(\n self.ada,\n \"nodes\",\n data={\"node\": 41, \"host\": \"foo\"},\n headers={\"HTTP_ACCEPT\": \"application/json\"},\n )\n assert response.status_code == status.HTTP_400_BAD_REQUEST\n assert response.data == {\n \"message\": \"Invalid input.\",\n \"errors\": [{\"field\": \"host\", \"message\": \"Enter a valid URL.\"}],\n }\n\n def test_retrieve_json_ok(self):\n key = self.create_node(self.ada, self.ada_public.id, 42).data[\"key\"]\n response = self.retrieve_json(self.ada, key)\n assert response.status_code == status.HTTP_200_OK\n assert response.data[\"creator\"] is self.ada.id\n assert response.data[\"created\"] is not None\n assert response.data[\"project\"] == self.ada_public.id\n assert response.data[\"key\"] == key\n assert response.data[\"node\"] == 42\n\n def test_retrieve_json_is_unmodified(self):\n \"\"\"Test that there is no modification to the node's JSON.\"\"\"\n inp = {\"property_a\": 1, \"property2\": {\"camelCased\": None}}\n key = self.create_node(self.ada, None, inp).data[\"key\"]\n out = self.retrieve_json(self.ada, key).data[\"node\"]\n assert out == inp\n\n def test_retrieve_json(self):\n key = self.create_node(self.ada, self.ada_public.id, \"A\").data[\"key\"]\n response = self.retrieve_json(None, key)\n assert response.status_code == status.HTTP_200_OK\n\n def test_retrieve_json_when_no_project(self):\n key = self.create_node(self.ada, None, \"A\").data[\"key\"]\n\n for user in (self.ada, self.bob):\n response = self.retrieve_json(self.ada, key)\n assert response.status_code == status.HTTP_200_OK\n\n def test_retrieve_html_ok(self):\n key = self.create_node(self.ada, self.ada_private.id, \"C\").data[\"key\"]\n response = self.retrieve_html(self.ada, key)\n assert response.status_code == status.HTTP_200_OK\n assert isinstance(response.accepted_renderer, renderers.TemplateHTMLRenderer)\n assert response.template_name == \"projects/nodes/retrieve.html\"\n assert response.data.get(\"html\") is not None\n\n def test_retrieve_anything(self):\n \"\"\"Test that if Accept:*/* or no Accept header that get HTML.\"\"\"\n key = self.create_node(self.ada, self.ada_private.id, \"A node\").data[\"key\"]\n\n response = self.retrieve(self.ada, \"nodes\", kwargs={\"key\": key})\n assert response.status_code == status.HTTP_200_OK\n assert isinstance(response.accepted_renderer, renderers.TemplateHTMLRenderer)\n\n response = self.retrieve(\n self.ada, \"nodes\", kwargs={\"key\": key}, headers={\"HTTP_ACCEPT\": \"*/*\"}\n )\n assert response.status_code == status.HTTP_200_OK\n assert isinstance(response.accepted_renderer, renderers.TemplateHTMLRenderer)\n\n def test_retrieve_html_when_no_project(self):\n \"\"\"Test that everyone gets complete view for any node with no project specified.\"\"\"\n key = self.create_node(self.ada, None, \"A\").data[\"key\"]\n\n for user in (self.ada, self.bob, None):\n response = self.retrieve_html(user, key)\n assert response.template_name == \"projects/nodes/retrieve.html\"\n", "sub_path": "manager/projects/api/views/nodes_tests.py", "file_name": "nodes_tests.py", "file_ext": "py", "file_size_in_byte": 5883, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "manager.testing.DatabaseTestCase", "line_number": 6, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 38, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 38, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 43, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 43, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 55, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 55, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_201_CREATED", "line_number": 58, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 58, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_403_FORBIDDEN", "line_number": 64, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 64, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_403_FORBIDDEN", "line_number": 68, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 68, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 74, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 74, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_400_BAD_REQUEST", "line_number": 87, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 87, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 96, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 96, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 113, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 113, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 120, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 120, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 125, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 125, "usage_type": "name"}, {"api_name": "rest_framework.renderers.TemplateHTMLRenderer", "line_number": 126, "usage_type": "attribute"}, {"api_name": "rest_framework.renderers", "line_number": 126, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 135, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 135, "usage_type": "name"}, {"api_name": "rest_framework.renderers.TemplateHTMLRenderer", "line_number": 136, "usage_type": "attribute"}, {"api_name": "rest_framework.renderers", "line_number": 136, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 141, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 141, "usage_type": "name"}, {"api_name": "rest_framework.renderers.TemplateHTMLRenderer", "line_number": 142, "usage_type": "attribute"}, {"api_name": "rest_framework.renderers", "line_number": 142, "usage_type": "name"}]} +{"seq_id": "315395663", "text": "import time\nstart_time = time.time()\nimport pandas as pd\n\nkeys = ['scada', 'interconnectors', 'dispatch_price']\n\nfilepath = '/Users/luka/Documents/Programming/Tempus/Python/Carbon_Intensity/Files/'\n\ndfs = {key: pd.read_csv(filepath+'mms_crop/'+key+'.csv', index_col='Timestamp') for key in keys}\ndfs.update({'emissions': pd.read_csv(filepath+'Emission_Factor.csv')})\nprint('import data into dictionaries time', time.time() - start_time)\n\n#emissions factor\ndfs['emissions'].rename(index=str, columns={\"CO2E_EMISSIONS_FACTOR\": \"CO2e_Emissions_Factor (t Co2e/ MWh)\", \"CO2E_ENERGY_SOURCE\": \"Energy Source\"}, inplace=True)\ndf_emissions = dfs['emissions'].loc[:,['DUID', 'CO2e_Emissions_Factor (t Co2e/ MWh)']]\nEmission_Factor_DUIDs = list(dfs['emissions'].loc[:, 'DUID'])\n\n#SA interconnectors\nSA_interconnectors = dfs['interconnectors'].loc[:,['METEREDMWFLOW V-S-MNSP1','METEREDMWFLOW V-SA']]\n\nSA_DUIDs = ['AGLHAL', 'ANGAST1', 'BLUFF1', 'BNGSF1', 'BOLIVAR1', 'CATHROCK', 'CLEMGPWF',\n 'CNUNDAWF', 'DALNTH01', 'DALNTHL1', 'DRYCGT1', 'DRYCGT2', 'DRYCGT3', 'HALLWF1',\n 'HALLWF2', 'HDWF1', 'HDWF2', 'HDWF3', 'HPRG1', 'HPRL1', 'LADBROK1', 'LADBROK2',\n 'LKBONNY1', 'LKBONNY2', 'LKBONNY3', 'LONSDALE', 'MINTARO', 'MINTNL1', 'MTMILLAR',\n 'NBHWF1', 'OSB-AG', 'POR01', 'POR03', 'PPCCGT', 'PTSTAN1', 'QPS1', 'QPS2','QPS3',\n 'QPS4', 'SATGN1', 'SATGS1', 'SNOWNTH1', 'SNOWSTH1', 'SNOWTWN1', 'SNUG1',\n 'STARHLWF', 'TATIARA1', 'TORRA1', 'TORRA2', 'TORRA3', 'TORRA4', 'TORRB1', 'TORRB2',\n 'TORRB3', 'TORRB4', 'WATERLWF', 'WINGF1_1', 'WINGF2_1', 'WPWF']\n\nVIC_DUIDs = ['AGLSOM', 'ARWF1','BALDHWF1', 'BAPS', 'BBASEHOS', 'BDL01', 'BDL02', 'BLNKVIC',\n 'BROADMDW', 'BROOKLYN', 'CBWF1', 'CHPSTWF1', 'CLAYTON', 'CLOVER', 'CODRNGTON',\n 'CORIO1', 'DARTM1', 'EILDON1', 'EILDON2', 'EILDON3', 'GANNSF1', 'GLENMAG1',\n 'HALAMRD1', 'HEPWIND1', 'HLMSEW01', 'HUMEV', 'JLA01', 'JLA02', 'JLA03', 'JLA04',\n 'JLB01', 'JLB02', 'JLB03', 'KIATAWF1', 'LNGS1', 'LNGS2', 'LOYYB1', 'LOYYB2', 'LYA1',\n 'LYA2', 'LYA3', 'LYA4', 'MACARTH1', 'MAROOWF1', 'MCKAY1', 'MERCER01', 'MLWF1',\n 'MORNW', 'MORTLK11', 'MORTLK12', 'MTGELWF1', 'MURRAY', 'NPS', 'OAKLAND1', 'RUBICON',\n 'SALTCRK1', 'SHEP1', 'SVALE1', 'TATURA01', 'TGNSS1', 'TOORAWF', 'VPGS1', 'VPGS2',\n 'VPGS3', 'VPGS4', 'VPGS5', 'VPGS6', 'WAUBRAWF', 'WILLHOV1', 'WKIEWA1', 'WKIEWA2',\n 'WOLLERT1', 'WONWP', 'WYNDW', 'YAMBUKWF', 'YSWF1', 'YWNGAHYD', 'YWPS1', 'YWPS2',\n 'YWPS3', 'YWPS4']\n\nSA_Common_DUIDs = list( set(Emission_Factor_DUIDs) - (set(Emission_Factor_DUIDs)- set(SA_DUIDs)))\nSA_Common_DUIDs.sort()\n\n#\n# South Australia\n#\n\n# emissions\nlist_emissions = []\nfor element in SA_Common_DUIDs:\n try:\n emission_factor = (df_emissions.loc[df_emissions['DUID'] == element].iloc[:,1]).values\n holding_emissions = dfs['scada'].loc[:,['SCADAVALUE '+ element]] * emission_factor * (5/60)\n final_emissions = holding_emissions.rename(columns = { 'SCADAVALUE ' + element : element + ' Emissions (t CO2e)' } )\n list_emissions.append(final_emissions)\n except KeyError:\n print('the following generator', element,'does not appear in the scada values')\n #some generators do not seem to be in the scada list, further to this\n #certain generators seem to have duplicate values for emission factor,\n #sort this out in emission factor scraping script\nDF_SA_Emissions = pd.concat(list_emissions, axis=1)\n\n# energy\n\nlist_energy = []\nfor element in SA_Common_DUIDs:\n try:\n holding_energy = dfs['scada'].loc[:,['SCADAVALUE '+ element]] * (5/60)\n final_energy = holding_energy.rename(columns = { 'SCADAVALUE ' + element : element + ' Energy (MWh)' } )\n list_energy.append(final_energy)\n except KeyError:\n print('the following generator', element,'does not appear in the scada values')\n #some generators do not seem to be in the scada list, further to this\n #certain generators seem to have duplicate values for emission factor,\n #sort this out in emission factor scraping script\nDF_SA_Energy = pd.concat(list_energy, axis=1)\n\nTotal_Energy = DF_SA_Energy.sum(axis=1)\nDF_SA_Energy['Total Energy (MWh)'] = Total_Energy\n\nTotal_Emissions = DF_SA_Emissions.sum(axis=1)\nDF_SA_Emissions['Total Emissions (t CO2e)'] = Total_Emissions\n\nCarbon_Intensity_MWh = Total_Emissions/Total_Energy\n# units of t CO2e / MWh\n\nCarbon_Intensity = Carbon_Intensity_MWh * 1000\n# times by 1000 to go from units of t CO2e/MWh to g CO2e/kWh\n\nMean = Carbon_Intensity.mean()\nmean = round(Mean,1)\nMax = Carbon_Intensity.max()\nmax = round(Max,1)\nMin = Carbon_Intensity.min()\nmin = round(Min,1)\n\nprint('The maximum intensity is', max,'g CO2e/kWh, the minimum intensity is', min,'g CO2e/kWh and the mean intensity is', mean, 'g CO2e/kWh.')\n\nDF_Carbon_Intensity = Carbon_Intensity.to_frame('Carbon Intensity (g CO2e/kWh)')\n# convert to a DataFrame and add a title to the column\n\nDF_Carbon_Intensity.index = pd.to_datetime(DF_Carbon_Intensity.index)\nDF_Carbon_Intensity = DF_Carbon_Intensity.resample('H').mean()\n\nDF_Carbon_Intensity.to_csv('/Users/luka/Documents/Programming/Tempus/Python/Carbon_Intensity/Files/Prescraped_SA_Carbon_Intensity.csv')\n\nimport matplotlib.pyplot as plt\n\nstart = Carbon_Intensity.index[0]\nend = Carbon_Intensity.index[len(Carbon_Intensity)-1]\n\nDF_Carbon_Intensity.plot(title='South Australia Carbon Intensity')\nplt.ylabel('Carbon Intensity (g CO2e/kWh)')\nplt.xlabel('')\nplt.margins(y=0.1)\nplt.tight_layout()\nplt.hlines(mean, start, end)\n", "sub_path": "Carbon_Intensity/mms_calculation.py", "file_name": "mms_calculation.py", "file_ext": "py", "file_size_in_byte": 5615, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "time.time", "line_number": 2, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 9, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 10, "usage_type": "call"}, {"api_name": "time.time", "line_number": 11, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 62, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 77, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 103, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 114, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 114, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 115, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 115, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.margins", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hlines", "line_number": 118, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 118, "usage_type": "name"}]} +{"seq_id": "47468500", "text": "from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'reixeta.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\turl(r'^$', 'principal.views.index', name=\"index\"),\n\turl(r'^es/$', 'principal.views.index_es', name=\"es\"),\n\turl(r'^es/exitos/','principal.views.exitos',name=\"exitos\"),\n\turl(r'^exits','principal.views.exits',name=\"exits\"),\n url(r'^admin/', include(admin.site.urls)),\n)\n", "sub_path": "reixeta/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 497, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.conf.urls.patterns", "line_number": 4, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 8, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 12, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "548209333", "text": "#!/usr/bin/env python3\nimport wx\n\nclass Example(wx.Frame):\n def __init__(self, *args, **kw):\n super(Example, self).__init__(*args, **kw)\n self.InitUI()\n \n def OnClose(self, e):\n self.Close(True)\n\n def InitUI(self):\n pnl = wx.Panel(self)\n cbnt = wx.Button(pnl, label='Close', pos=(20, 30))\n cbnt.Bind(wx.EVT_BUTTON, self.OnClose)\n self.SetSize((300, 300))\n self.SetTitle('wx.Button')\n self.Centre()\n self.Show(True)\n\n wx.StaticBox(pnl, pos=(95,135), size=(110,120))\n \n light = wx.ToggleButton(pnl, label = 'red', pos=(20,80))\n self.cpnl = wx.Panel(pnl, pos=(100,150), size = (100,100))\n light.Bind(wx.EVT_TOGGLEBUTTON, self.ToggleLight)\n\n\n def ToggleLight(self,e):\n obj = e.GetEventObject()\n isPressed = obj.GetValue()\n if isPressed:\n self.cpnl.SetBackgroundColour((128,0,0))\n else:\n self.cpnl.SetBackgroundColour((255,255,255))\n\ndef main():\n app = wx.App()\n Example(None)\n app.MainLoop()\n\nif __name__ == '__main__':\n main()", "sub_path": "4th_PYTHON/6_lecture2.py", "file_name": "6_lecture2.py", "file_ext": "py", "file_size_in_byte": 1105, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "wx.Frame", "line_number": 4, "usage_type": "attribute"}, {"api_name": "wx.Panel", "line_number": 13, "usage_type": "call"}, {"api_name": "wx.Button", "line_number": 14, "usage_type": "call"}, {"api_name": "wx.EVT_BUTTON", "line_number": 15, "usage_type": "attribute"}, {"api_name": "wx.StaticBox", "line_number": 21, "usage_type": "call"}, {"api_name": "wx.ToggleButton", "line_number": 23, "usage_type": "call"}, {"api_name": "wx.Panel", "line_number": 24, "usage_type": "call"}, {"api_name": "wx.EVT_TOGGLEBUTTON", "line_number": 25, "usage_type": "attribute"}, {"api_name": "wx.App", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "129611926", "text": "from flask import Blueprint, render_template, request, redirect, url_for\nfrom crm import db\nfrom crm.clientes.models import Cliente\nfrom crm.projetos.models import Projeto\nfrom datetime import *\nfrom sqlalchemy.sql import func\n\nhome = Blueprint('home', __name__, template_folder=\"templates\")\n\n@home.route(\"/index\", methods=['GET', 'POST'])\n@home.route('/', methods=['GET', 'POST'])\ndef index():\n\n clientes = Cliente.query.all()\n if request.method == 'POST' and \"nome\" in request.form:\n nome = request.form['nome']\n email = request.form['email']\n telefone = request.form['telefone']\n status = request.form['status']\n endereco = request.form['endereco']\n datainsercao = request.form['datainsercao']\n prospeccao = request.form.get('prospeccao')\n\n clientes = Cliente(nome = nome,\n email = email,\n telefone = telefone,\n status = status,\n endereco = endereco,\n datainsercao = datainsercao,\n prospeccao = prospeccao)\n\n db.session.add(clientes)\n db.session.commit()\n\n return redirect(url_for('home.index'))\n\n projetos = Projeto.query.all()\n projetos.sort(key=lambda x: x.data, reverse=False)\n listadoida = list()\n \n for c in clientes:\n d1 = datetime.today()\n d2 = datetime.strptime(c.datainsercao, '%Y-%m-%d')\n\n delta = abs((d1 - d2).days)\n \n if delta <= 5:\n listadoida.append(c)\n\n totprojetos = int(0)\n \n for p in projetos:\n if p.status != \"Cancelado\":\n totprojetos += 1\n else:\n pass\n \n faturamento = Projeto.query.with_entities(func.sum(Projeto.valor)).all()\n\n media = Projeto.query.with_entities(func.avg(Projeto.nota)).all()\n \n data = date.today()\n ano = data.strftime('%Y')\n\n if request.method == 'POST' and \"nomeprojeto\" in request.form:\n projetos = Projeto(request.form['nomeprojeto'], \n request.form['tipo'], \n request.form['cliente'], \n request.form['url'], \n request.form['valor'], \n request.form['po'], \n request.form['prazo'], \n request.form['inicio'], \n request.form['status'],\n request.form['nota'])\n \n db.session.add(projetos)\n db.session.commit()\n \n \n return redirect(url_for('home.index'))\n\n \n\n return render_template('index.html', clientes=clientes, \n projetos=projetos, \n faturamento=faturamento,\n media=media,\n ano=ano,\n totprojetos=totprojetos,\n listadoida=listadoida)", "sub_path": "crm/home/view.py", "file_name": "view.py", "file_ext": "py", "file_size_in_byte": 3095, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "flask.Blueprint", "line_number": 8, "usage_type": "call"}, {"api_name": "crm.clientes.models.Cliente.query.all", "line_number": 14, "usage_type": "call"}, {"api_name": "crm.clientes.models.Cliente.query", "line_number": 14, "usage_type": "attribute"}, {"api_name": "crm.clientes.models.Cliente", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.request.method", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request.form", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 18, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 18, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 19, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 19, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 20, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 20, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 21, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 21, "usage_type": "name"}, {"api_name": "flask.request.form.get", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 22, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 22, "usage_type": "name"}, {"api_name": "crm.clientes.models.Cliente", "line_number": 24, "usage_type": "call"}, {"api_name": "crm.db.session.add", "line_number": 32, "usage_type": "call"}, {"api_name": "crm.db.session", "line_number": 32, "usage_type": "attribute"}, {"api_name": "crm.db", "line_number": 32, "usage_type": "name"}, {"api_name": "crm.db.session.commit", "line_number": 33, "usage_type": "call"}, {"api_name": "crm.db.session", "line_number": 33, "usage_type": "attribute"}, {"api_name": "crm.db", "line_number": 33, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 35, "usage_type": "call"}, {"api_name": "crm.projetos.models.Projeto.query.all", "line_number": 37, "usage_type": "call"}, {"api_name": "crm.projetos.models.Projeto.query", "line_number": 37, "usage_type": "attribute"}, {"api_name": "crm.projetos.models.Projeto", "line_number": 37, "usage_type": "name"}, {"api_name": "datetime.today", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.strptime", "line_number": 43, "usage_type": "call"}, {"api_name": "crm.projetos.models.Projeto.query.with_entities", "line_number": 58, "usage_type": "call"}, {"api_name": "crm.projetos.models.Projeto.query", "line_number": 58, "usage_type": "attribute"}, {"api_name": "crm.projetos.models.Projeto", "line_number": 58, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.func.sum", "line_number": 58, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.func", "line_number": 58, "usage_type": "name"}, {"api_name": "crm.projetos.models.Projeto.valor", "line_number": 58, "usage_type": "attribute"}, {"api_name": "crm.projetos.models.Projeto.query.with_entities", "line_number": 60, "usage_type": "call"}, {"api_name": "crm.projetos.models.Projeto.query", "line_number": 60, "usage_type": "attribute"}, {"api_name": "crm.projetos.models.Projeto", "line_number": 60, "usage_type": "name"}, {"api_name": "sqlalchemy.sql.func.avg", "line_number": 60, "usage_type": "call"}, {"api_name": "sqlalchemy.sql.func", "line_number": 60, "usage_type": "name"}, {"api_name": "crm.projetos.models.Projeto.nota", "line_number": 60, "usage_type": "attribute"}, {"api_name": "flask.request.method", "line_number": 65, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 65, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 65, "usage_type": "attribute"}, {"api_name": "crm.projetos.models.Projeto", "line_number": 66, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 66, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 67, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 68, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 68, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 69, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 69, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 70, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 70, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 71, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 71, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 72, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 73, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 73, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 74, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 74, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 75, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 75, "usage_type": "name"}, {"api_name": "crm.db.session.add", "line_number": 77, "usage_type": "call"}, {"api_name": "crm.db.session", "line_number": 77, "usage_type": "attribute"}, {"api_name": "crm.db", "line_number": 77, "usage_type": "name"}, {"api_name": "crm.db.session.commit", "line_number": 78, "usage_type": "call"}, {"api_name": "crm.db.session", "line_number": 78, "usage_type": "attribute"}, {"api_name": "crm.db", "line_number": 78, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 81, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "390926522", "text": "from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth.models import User\nfrom account.models import Contact\nfrom account.forms import YueeeeeeeeUserCreationForm\n\n# Create your views here.\ndef dashboard(request):\n return render(\n request,\n 'account/dashboard.html'\n )\n\ndef user_list(request):\n user_list = User.objects.all()\n return render(\n request,\n 'account/user_list.html',\n {'user_list': user_list}\n )\n\ndef register(request):\n if request.method == 'POST':\n user_form = YueeeeeeeeUserCreationForm(request.POST)\n if user_form.is_valid():\n new_user = user_form.save(commit=False)\n new_user.set_password(user_form.cleaned_data['password'])\n new_user.save()\n return render(\n request,\n 'account/register_done.html',\n {'new_user': new_user}\n )\n else:\n user_form = YueeeeeeeeUserCreationForm()\n return render(\n request,\n 'account/register.html',\n {'user_form': user_form}\n )\n\n@login_required\ndef profile(request, username):\n user = User.objects.filter(username=username).first()\n return render(\n request,\n {'user': user}\n )\n\n@login_required\ndef follow(request, username):\n user_following = request.user\n user_followed = User.objects.filter(username=username).first()\n con = Contact(user_following=user_following, user_followed=user_followed)\n con.save()\n return redirect('account:profile', username=username)\n\n@login_required\ndef unfollow(request, username):\n user_following = request.user\n user_followed = User.objects.filter(username=username).first()\n con = Contact.objects.filter(user_following=user_following, user_followed=user_followed).first()\n if con:\n con.delete()\n return redirect('account:profile', username=username)", "sub_path": "account/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2067, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.shortcuts.render", "line_number": 11, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.all", "line_number": 17, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 17, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 18, "usage_type": "call"}, {"api_name": "account.forms.YueeeeeeeeUserCreationForm", "line_number": 26, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 31, "usage_type": "call"}, {"api_name": "account.forms.YueeeeeeeeUserCreationForm", "line_number": 37, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 38, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 46, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 46, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 46, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 47, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 44, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 55, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 55, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 55, "usage_type": "name"}, {"api_name": "account.models.Contact", "line_number": 56, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 58, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 52, "usage_type": "name"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 63, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 63, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 63, "usage_type": "name"}, {"api_name": "account.models.Contact.objects.filter", "line_number": 64, "usage_type": "call"}, {"api_name": "account.models.Contact.objects", "line_number": 64, "usage_type": "attribute"}, {"api_name": "account.models.Contact", "line_number": 64, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.auth.decorators.login_required", "line_number": 60, "usage_type": "name"}]} +{"seq_id": "289905584", "text": "import datetime\n\nfrom django.db.models.query import QuerySet\nfrom django.utils import simplejson as json\n\n\nclass ModelEncoder(json.JSONEncoder):\n def default(self, obj):\n # Deal with data types that don't serialize into JSON naturally\n if isinstance(obj, (datetime.date, datetime.datetime)):\n return obj.isoformat()\n\n return json.JSONEncoder.default(self, obj)\n\n\nclass Serializer(object):\n # TODO: many_to_many_fields should be renamed to related_fields\n def serialize(self, queryset, fields=[], many_to_many_fields=[],\n extra_data={}, deep=False, page=None, per_page=10):\n\n self.prepare_for_serialization(\n queryset,\n fields=fields,\n many_to_many_fields=many_to_many_fields,\n extra_data=extra_data,\n deep=deep,\n page=page,\n per_page=per_page\n )\n json_data = self.encode_to_json()\n\n return json_data\n\n # Convert queryset into a serializable Python data structure\n def prepare_for_serialization(self, queryset, fields=[],\n many_to_many_fields=[], extra_data={}, deep=False, page=None,\n per_page=10):\n if not isinstance(queryset, QuerySet):\n raise TypeError('QuerySet object expected as first argument, got \\\n %s instead.' % queryset.__class__.__name__)\n if not isinstance(fields, (tuple, list)):\n raise TypeError('Tuple or list expected as second argument, got \\\n %s instead.' % fields.__class__.__name__)\n\n if not isinstance(many_to_many_fields, (tuple, list)):\n raise TypeError('Tuple or list expected as third argument, got %s \\\n instead.' % many_to_many_fields.__class__.__name__)\n if not isinstance(extra_data, dict):\n raise TypeError('Dictionary expected as fourth argument, got %s \\\n instead.' % extra_data.__class__.__name__)\n\n if deep:\n queryset = queryset.prefetch_related(*many_to_many_fields)\n\n self.data = list(queryset.values(*fields))\n\n if page is not None:\n self.paginate_data(page, per_page)\n\n for datum in self.data:\n # Serialize many-to-many fields\n for related_model_name in many_to_many_fields:\n model_instance = queryset.get(id=datum['id'])\n\n # This is indeed a many-to-many field\n m2m_field = getattr(model_instance, related_model_name, None)\n if m2m_field and m2m_field.all():\n if deep:\n target_field_names = \\\n m2m_field.model._meta.get_all_field_names()\n # Only serialize basic fields (avoid relationships)\n # Interesting bug with list item removal within a list\n # comprehension. Using range here to remove list items\n # in reverse order to sidestep the issue.\n for index in range(len(target_field_names)-1, -1, -1):\n field = getattr(\n m2m_field.all()[0],\n target_field_names[index]\n )\n\n # Get rid of complex fields in serialization\n if not isinstance(field, (int, long, unicode,\n datetime.datetime)):\n # Handle file like fields\n try:\n if getattr(field, 'url', None) is None:\n del target_field_names[index]\n except ValueError:\n del target_field_names[index]\n\n value_list = m2m_field.values(*target_field_names)\n datum[related_model_name] = []\n for value in value_list:\n record = {}\n for k, v in value.items():\n record[k] = v\n datum[related_model_name].append(record)\n else:\n value_list = ', '.join([str(x) for x in m2m_field.all()])\n datum[related_model_name] = value_list\n\n # Shove extra data into the payload. Not too useful for serializing\n # querysets of more than one item. Should be replaced with deep\n # serialization (see above).\n for key, value in extra_data.items():\n datum[key] = value\n\n def paginate_data(self, page, per_page):\n start_index = ((page-1) * per_page)\n stop_index = start_index + per_page\n self.data = self.data[start_index:stop_index]\n\n def encode_to_json(self):\n model_encoder = ModelEncoder()\n if len(self.data) == 1:\n return model_encoder.encode(self.data[0])\n\n return model_encoder.encode(self.data)\n", "sub_path": "src/api/crashbomb/utils/serialization.py", "file_name": "serialization.py", "file_ext": "py", "file_size_in_byte": 5044, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.utils.simplejson.JSONEncoder", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.utils.simplejson", "line_number": 7, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 10, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 10, "usage_type": "attribute"}, {"api_name": "django.utils.simplejson.JSONEncoder.default", "line_number": 13, "usage_type": "call"}, {"api_name": "django.utils.simplejson.JSONEncoder", "line_number": 13, "usage_type": "attribute"}, {"api_name": "django.utils.simplejson", "line_number": 13, "usage_type": "name"}, {"api_name": "django.db.models.query.QuerySet", "line_number": 38, "usage_type": "argument"}, {"api_name": "datetime.datetime", "line_number": 83, "usage_type": "attribute"}]} +{"seq_id": "99922599", "text": "\"\"\"\nOffline data analysis and visualization tool for Xray Absorption Spectroscopy\n(XAS) experiments at SCS, European XFEL.\n\nAuthor: Jun Zhu \nCopyright (C) European X-Ray Free-Electron Laser Facility GmbH.\nAll rights reserved.\n\"\"\"\nimport abc\nfrom collections import OrderedDict\nimport datetime\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.linear_model import LinearRegression\n\nfrom karabo_data import RunDirectory\n\n\ndef find_peaks(trace, n_peaks, peak_start, peak_width, \n background_end, background_width, peak_interval):\n \"\"\"Return a list of peaks.\n \n :param xarray trace: trace for all the trains. shape = (trains, samples)\n :param int n_peaks: number of expected peaks.\n :param int peak_start: start position of the first peak.\n :param int peak_width: width of a peak.\n :param int background_end: end position of the background for the \n first peak.\n :param int background_width: width of a background.\n :param int peak_interval: gap between peaks.\n\n :return list peaks: a list of peak data in 2D numpy.ndarray with\n shape = (trains, samples).\n :return list backgrounds: a list of background data in 2D numpy.ndarray\n with shape = (trains, samples).\n \"\"\"\n peaks = []\n backgrounds = []\n peak0 = peak_start\n bkg0 = background_end - background_width\n for i in range(n_peaks):\n peaks.append(trace[:, peak0:peak0 + peak_width])\n backgrounds.append(trace[:, bkg0:bkg0 + background_width])\n peak0 += peak_interval\n bkg0 += peak_interval\n\n return peaks, backgrounds \n\n\ndef compute_absorption_sigma(mu1, sigma1, mu2, sigma2, corr):\n \"\"\"Compute the standard deviation for absorption.\n \n :param float/Series mu1: dataset 1 mean.\n :param float/Series sigma1: dataset 1 standard deviation. \n :param float/Series mu2: dataset 2 mean.\n :param float/Series sigma2: dataset 2 standard deviation.\n :param float/Series corr: correlation between dataset 1 and 2.\n\n :return float/Series: standard deviation of -log(mu2/mu1).\n \"\"\"\n if mu1 == 0 or mu2 == 0:\n raise ValueError(\"mu1 and mu2 cannot be zero!\")\n\n return np.sqrt((sigma1 / mu1) ** 2 + (sigma2 / mu2) ** 2 \n - 2 * corr * sigma1 * sigma2 / (mu1 * mu2))\n\n\ndef compute_absorption(I0, I1):\n \"\"\"Compute absorption.\n\n A = -log(I1/I0)\n\n :param float/numpy.ndarray I0: incident intensity.\n :param float/numpy.ndarray I1: transmitted intensity.\n\n :return float/numpy/ndarray: absorption.\n \"\"\"\n return -np.log(I1 / I0)\n\n\nclass XasAnalyzer(abc.ABC):\n \"\"\"Abstract class for Xray Absoprtion Spectroscopy analysis.\"\"\"\n\n def __init__(self, run_folder):\n \"\"\"Initialization.\n \n :param str run_folder: full path of the run folder.\n \"\"\"\n self._run = RunDirectory(run_folder)\n \n self._sources = {\n 'MONO': 'SA3_XTD10_MONO/MDL/PHOTON_ENERGY',\n 'XGM':'SCS_BLU_XGM/XGM/DOOCS',\n 'XGM_OUTPUT': 'SCS_BLU_XGM/XGM/DOOCS:output',\n 'SA3_XGM': 'SA3_XTD10_XGM/XGM/DOOCS',\n 'SA3_XGM_OUTPUT': 'SA3_XTD10_XGM/XGM/DOOCS:output'\n }\n\n # get the DataFrame for XGM control data\n self._xgm_df = self._run.get_dataframe(\n fields=[(self._sources['XGM'], '*value')])\n self._xgm_df.rename(columns=lambda x: x.split('/')[-1], inplace=True)\n self._sa3_xgm_df = self._run.get_dataframe(\n fields=[(self._sources['SA3_XGM'], '*value')])\n self._sa3_xgm_df.rename(columns=lambda x: x.split('/')[-1],\n inplace=True)\n \n # get the DataFrame for SoftMono control data\n self._mono_df = self._run.get_dataframe(\n fields=[(self._sources['MONO'], '*value')])\n self._mono_df.rename(columns=lambda x: x.split('/')[-1], inplace=True)\n\n self._photon_energies = None # photon energies for each pulse\n self._I0 = None \n self._I1 = OrderedDict()\n\n self._data = None # pulse-resolved data in DataFrame\n\n def info(self):\n \"\"\"Print out information of the run(s).\"\"\"\n first_train = self._run.train_ids[0]\n last_train = self._run.train_ids[-1]\n train_count = len(self._run.train_ids)\n span_sec = (last_train - first_train) / 10\n span_txt = str(datetime.timedelta(seconds=span_sec))\n photon_energies = self._mono_df['actualEnergy']\n\n print('# of trains: ', train_count)\n print('Duration: ', span_txt)\n print('First train ID: ', first_train)\n print('Last train ID: ', last_train)\n print('Min photon energy: ', round(photon_energies.min(), 4), 'eV')\n print('Max photon energy: ', round(photon_energies.max(), 4), 'eV')\n\n print('MCP channels:')\n for ch, value in self._channels.items():\n print(' - {}: {}'.format(ch, value['raw']))\n\n def _check_sources(self):\n \"\"\"Check all the required sources are in the data.\"\"\"\n sources = self._run.all_sources\n for src in self._sources.values():\n if src not in sources:\n raise ValueError(\"Source not found: {}!\".format(src))\n\n def plot_xgm_run(self, *, figsize=(8, 5.6)):\n \"\"\"Plot the train resolved data from XGM.\n\n :param tuple figsize: figure size.\n \"\"\"\n import matplotlib.pyplot as plt\n plt.rcParams['font.size'] = 12\n\n fig, (ax1, ax2) = plt.subplots(2, 1, figsize=figsize)\n ax1_tw = ax1.twinx()\n\n ln1 = ax1.plot(self._xgm_df['pulseEnergy.photonFlux'],\n label=r\"Pulse energy ($\\mu$J)\")\n # \"nummberOfBrunches\" is indeed the name in the Karabo Device\n # implementation. For more details, please check\n # https://git.xfel.eu/gitlab/karaboDevices/xgmDoocs\n number_of_bunches = self._xgm_df['pulseEnergy.nummberOfBrunches']\n ln2 = ax1_tw.plot(number_of_bunches, label=\"Number of pulses\", c='g')\n\n lns = ln1 + ln2\n labels = [l.get_label() for l in lns]\n ax1.legend(lns, labels)\n ax1.set_ylabel(r\"Pulse energy ($\\mu$J)\")\n ax1_tw.set_ylabel(\"Number of pulses\")\n if number_of_bunches.max() - number_of_bunches.min() < 5:\n mean_n_bunches = int(number_of_bunches.mean())\n ax1_tw.set_ylim((mean_n_bunches - 4.5, mean_n_bunches + 4.5))\n\n ax2.plot(1000 * self._xgm_df['beamPosition.ixPos'], label=\"x\")\n ax2.plot(1000 * self._xgm_df['beamPosition.iyPos'], label=\"y\")\n\n ax2.set_xlabel(\"Train ID\")\n ax2.set_ylabel(r\"Beam position ($\\mu$m)\")\n ax2.legend()\n fig.tight_layout()\n\n return fig, (ax1, ax1_tw, ax2)\n\n def plot_xgm_train(self, *, index=0, train_id=None, figsize=(8, 5.6)):\n \"\"\"Plot xgm measurement in a given train.\n \n :param int index: train index. Ignored if train_id is given.\n :param int train_id: train ID.\n :param tuple figsize: figure size.\n \"\"\"\n import matplotlib.pyplot as plt\n plt.rcParams['font.size'] = 12\n\n key = \"data.intensityTD\"\n filtered = self._run.select(\"*XGM/*\", key)\n if train_id is None:\n tid, data = filtered.train_from_index(index)\n else:\n tid, data = filtered.train_from_id(train_id)\n\n fig, (ax1, ax2) = plt.subplots(2, 1, figsize=figsize)\n\n ax1.plot(data[self._sources['SA3_XGM_OUTPUT']][key], marker='.')\n ax2.plot(data[self._sources['XGM_OUTPUT']][key], marker='.')\n for ax in (ax1, ax2):\n ax.set_ylabel(r\"Pulse energy ($\\mu$J)\")\n ax.set_xlim((-0.5, 100.5))\n\n ax1.set_title(\"SA3 XGM\")\n ax2.set_title(\"SCS XGM\")\n ax2.set_xlabel(\"Pulse ID\")\n fig.suptitle(\"Train ID: {}\".format(tid))\n fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n\n return fig, (ax1, ax2)\n\n @abc.abstractmethod\n def process(self, *args, **kwargs):\n \"\"\"Process the run data.\n\n :return: the current instance.\n \"\"\"\n pass\n\n def select(self, keys, lower=-np.inf, upper=np.inf):\n \"\"\"Select data within the given boundaries.\n\n It modifies the internal data inplace.\n\n :param str/list/tuple/numpy.ndarray: key(s) for applying the filter.\n :param float lower: lower boundary (included).\n :param float upper: higher boundary (included).\n\n :return: the current instance.\n \"\"\"\n n0 = len(self._data)\n if isinstance(keys, (list, tuple, np.ndarray)):\n # TODO: remove this for loop\n for key in keys:\n self._data.query(\"{} <= {} <= {}\".format(lower, key, upper),\n inplace=True)\n else:\n self._data.query(\"{} <= {} <= {}\".format(lower, keys, upper),\n inplace=True)\n\n print(\"{} out of {} data are selected!\".format(len(self._data), n0))\n return self\n\n @property\n @abc.abstractmethod\n def data(self):\n \"\"\"Get the pulse-resolved data in pandas.DataFrame.\"\"\"\n pass\n\n @abc.abstractmethod\n def compute_total_absorption(self):\n \"\"\"Compute absorption for all data.\"\"\"\n pass\n\n @abc.abstractmethod\n def compute_spectrum(self, n_bins=20, point_wise=False):\n \"\"\"Compute spectrum.\n\n :param int n_bins: number of energy bins.\n :param bool point_wise: if True, calculate the absorption point wise\n and then average. Otherwise, average over I0 and I1 first and\n then calculate the absorption. Default = False\n \"\"\"\n pass\n\n @abc.abstractmethod\n def plot_correlation(self, *args, **kwargs):\n \"\"\"Generate correlation plots.\"\"\"\n pass\n\n @abc.abstractmethod\n def plot_spectrum(self, *args, **kwargs):\n \"\"\"Generate spectrum plots.\"\"\"\n\n\nclass XasTim(XasAnalyzer):\n def __init__(self, *args, channels=('D', 'C', 'B', 'A'), \n pulse_separation=880e-9, interleaved_mode=False, **kwargs):\n \"\"\"Initialization.\n \n :param tuple channels: names of AdqDigitizer channels which \n connects to MCP1 to MCP4.\n :param float pulse_separation: pulse separation in a train, in s.\n :param bool interleaved_mode: the resolution is improved by a factor\n of two in the interleaved mode. Default = False.\n \"\"\"\n super().__init__(*args, **kwargs)\n\n self._sources.update({\n 'DIGITIZER': 'SCS_UTC1_ADQ/ADC/1',\n 'DIGITIZER_OUTPUT': 'SCS_UTC1_ADQ/ADC/1:network'\n })\n\n self._front_channels = ('MCP1', 'MCP2', 'MCP3')\n self._back_channels = ('MCP4',)\n\n self._channels = {\n 'MCP{}'.format(i):\n {'raw': \"digitizers.channel_1_{}.raw.samples\".format(ch),\n 'apd': \"digitizers.channel_1_{}.apd.pulseIntegral\".format(ch)}\n for i, ch in enumerate(channels, 1)}\n\n self._check_sources()\n\n self._resolution = 0.5e-9 # digitizer resolution, in s\n if interleaved_mode:\n self._resolution = 0.25e-9\n self._peak_interval = pulse_separation / self._resolution\n \n for ch in self._channels:\n self._I1[ch] = None \n\n def plot_digitizer_train(self, *, index=0, train_id=None,\n figsize=(8, 11.2), x_min=None, x_max=None):\n \"\"\"Plot digitizer signals in a given train.\n\n :param int index: train index. Ignored if train_id is given.\n :param int train_id: train ID.\n :param tuple figsize: figure size.\n :param int x_min: minimum sample ID.\n :param int x_max: maximum sample ID.\n \"\"\"\n if train_id is None:\n tid, data = self._run.train_from_index(index)\n else:\n tid, data = self._run.train_from_id(train_id)\n\n digitizer_raw_data = {\n ch: data[self._sources['DIGITIZER_OUTPUT']][value['raw']]\n for ch, value in self._channels.items()\n }\n\n n_channels = len(self._channels)\n\n import matplotlib.pyplot as plt\n plt.rcParams['font.size'] = 12\n\n fig, axes = plt.subplots(n_channels, 1, figsize=figsize)\n\n for ax, (key, value) in zip(axes, digitizer_raw_data.items()):\n ax.plot(value)\n ax.set_title(key.upper())\n ax.set_ylabel(\"Intensity (arb.)\")\n ax.set_xlim((x_min, x_max))\n ax.set_xlabel(\"Samples\")\n \n fig.suptitle(\"Train ID: {}\".format(tid))\n fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n\n return fig, axes\n\n def _integrate_channel(self, channel_id, n_pulses, *args): \n \"\"\"Integrate the peaks in a given channel for all trains.\n\n The background value is subtracted.\n \n :param str channel_id: full name of the output channel.\n :param int n_pulses: number of pulses in a train.\n\n :return numpy.ndarray: 1D array holding integration result for\n each train.\n \"\"\"\n trace = self._run.get_array(self._sources['DIGITIZER_OUTPUT'],\n channel_id)\n\n peaks, backgrounds = find_peaks(trace, n_pulses, *args)\n\n ret = []\n for peak, background in zip(peaks, backgrounds):\n bkg = background.median(axis=-1)\n # the broadcast only works with xarray\n ret.append(np.trapz(peak - bkg, axis=-1)) \n\n return np.ravel(ret, order=\"F\") \n\n def process(self, n_pulses, pulse_id0=0, *, use_apd=True,\n peak_start=None, peak_width=None, \n background_end=None, background_width=None):\n \"\"\"Process the run data.\n \n :param int n_pulses: number of pulses in a train.\n :param int pulse_id0: first pulse ID. Default = 0.\n :param bool use_apd: use the integration calculated from the \n hardware.\n :param int peak_start: start position of the first peak. Ignored if\n use_apd == True.\n :param int peak_width: width of a peak. Ignored if use_apd == True.\n :param int background_end: end position of the background for the \n first peak. Ignored if use_apd == True.\n :param int background_width: width of a background. Ignored if \n use_apd == True.\n\n :return: the current instance.\n \"\"\"\n # self._I0 is a numpy.ndarray\n self._I0 = self._run.get_array(\n self._sources['XGM_OUTPUT'], 'data.intensityTD').values[...,\n pulse_id0:pulse_id0 + n_pulses].flatten()\n\n for ch, value in self._channels.items():\n if use_apd:\n integrals = self._run.get_array(\n self._sources['DIGITIZER_OUTPUT'], value['apd']).values[\n ..., :n_pulses]\n self._I1[ch] = -np.ravel(integrals)\n else:\n # self._I1[ch] is a list of numpy.ndarray\n # Note: the sign of I1 is reversed here!!!\n self._I1[ch] = -self._integrate_channel(\n value['raw'], n_pulses, peak_start, peak_width, \n background_end, background_width, int(self._peak_interval))\n\n self._photon_energies = np.repeat(\n self._mono_df['actualEnergy'], n_pulses)\n \n data = {'energy': self._photon_energies, \"XGM\": self._I0}\n data.update({ch: self._I1[ch] for ch in self._channels})\n \n self._data = pd.DataFrame(data)\n\n return self\n\n @property\n def data(self):\n \"\"\"Get the pulse-resolved data in pandas.DataFrame.\n\n :return: pulse-resolved data in pandas.DataFrame with index being\n the train ID and columns being:\n - XGM: intensity from XGM;\n - MCP1, MCP2, MCP3, MCP4: intensity from MCPs. The sign of data\n is reversed;\n - energy: photon energy\n \"\"\"\n if self._data is None:\n raise ValueError(\"You need to call the method 'process' first.\")\n return self._data \n\n def compute_total_absorption(self):\n \"\"\"Compute absorption for all data.\n\n :return: total absorption data in pandas.DataFrame with index being\n the MCP channel name and columns being:\n - muA: absorption mean;\n - sigmaA: absorption standard deviation;\n - muI0: I0 mean;\n - sigmaI0: I0 standard deviation;\n - weight: sum of I0 values;\n - muI1: I1 mean;\n - sigmaI1: I1 standard deviation;\n - corr: correlation coefficient between I0 and I1;\n - count: number of data.\n \"\"\"\n absorption = pd.DataFrame(\n columns=['muA', 'sigmaA', 'muI0', 'sigmaI0', 'weight',\n 'muI1', 'sigmaI1', 'corr', 'count']\n )\n\n I0 = self._data['XGM']\n\n muI0 = I0.mean()\n sigmaI0 = I0.std()\n weight = I0.sum()\n count = I0.size\n\n for ch in self._front_channels:\n I1 = self._data[ch]\n\n muI1 = I1.mean()\n sigmaI1 = I1.std()\n\n corr = np.corrcoef(I1, I0)[0, 1]\n absorption.loc[ch] = (\n compute_absorption(muI0, muI1),\n compute_absorption_sigma(muI0, sigmaI0, muI1, sigmaI1, corr),\n muI0, sigmaI0, weight, muI1, sigmaI1, corr, count\n )\n\n return absorption\n\n def compute_spectrum(self, n_bins=20, point_wise=False):\n \"\"\"Compute spectrum.\n\n :param int n_bins: number of energy bins.\n :param bool point_wise: if True, calculate the absorption point wise\n and then average. Otherwise, average over I0 and I1 first and\n then calculate the absorption. Default = False\n\n :return: spectrum data in pandas.DataFrame with index being the\n energy bin range and columnsbeing:\n - energy: central energy of each bin;\n - count: number of data points for each energy bin;\n - muXGM, muMCP1, muMCP2, muMCP3: intensity mean;\n - sigmaXGM, sigmaMCP1, sigmaMCP2, sigmaMCP3: intensity standard\n deviations;\n - muA1, muA2, muA3: absorption mean;\n - sigmaA1, sigmaA2, sigmaA3: absorption standard deviation;\n - corrMCP1, corrMCP2, corrMCP3: correlation between MCP and XGM.\n \"\"\"\n # binning\n binned = self._data.groupby(pd.cut(self._data['energy'], bins=n_bins))\n\n if not point_wise:\n # mean\n\n binned_mean = binned.mean()\n # rename columns, e.g. 'A' -> 'muA'\n binned_mean.columns = ['mu' + col if col != 'energy' else col\n for col in binned_mean.columns]\n # standard deviation\n\n binned_std = binned.std()\n # we use the \"energy\" column in binned_mean\n binned_std.drop(\"energy\", axis=1, inplace=True)\n binned_std.columns = ['sigma' + col for col in binned_std.columns]\n\n # correlation\n\n # calculate the correlation between 'XGM' and all the 'MCP'\n # columns for each group.\n binned_corr = binned.corr().loc[pd.IndexSlice[:, 'XGM'], :].drop(\n columns=['XGM', 'energy'], axis=1).reset_index(\n level=1, drop=True)\n binned_corr.columns = ['corr' + col for col in binned_corr.columns]\n\n # combine all the above results\n\n spectrum = pd.concat(\n [binned_mean, binned_std, binned_corr], axis=1)\n spectrum['count'] = binned['energy'].count()\n\n # calculate absorption and its sigma for each bin\n for i, ch in enumerate(self._front_channels, 1):\n spectrum['muA{}'.format(i)] = spectrum.apply(\n lambda x: compute_absorption(x['muXGM'], x['mu' + ch]), axis=1)\n spectrum['sigmaA{}'.format(i)] = spectrum.apply(\n lambda x: compute_absorption_sigma(x['muXGM'],\n x['sigmaXGM'],\n x['mu' + ch],\n x['sigma' + ch],\n x['corr' + ch]), axis=1)\n else:\n raise NotImplementedError(\"point_wise=True is not implemented.\")\n\n return spectrum\n\n def plot_correlation(self, channel=None, *, figsize=(8, 6), ms=6,\n alpha=0.05, n_bins=20):\n \"\"\"Generate correlation plots.\n\n :param str channel: MCP channel name, e.g. MCP1, for visualizing\n a single channel with four plots, or None (default) for\n visualizing all the channels with one plot each.\n Case insensitive.\n :param tuple figsize: figure size.\n :param int ms: marker size for the scatter plots.\n :param float alpha: transparency for the scatter plots.\n :param int n_bins: number of bins for the histogram plots.\n \"\"\"\n import matplotlib.pyplot as plt\n plt.rcParams['font.size'] = 12\n \n absorption = self.compute_total_absorption()\n\n fig, axes = plt.subplots(2, 2, figsize=figsize)\n\n I0 = self._data['XGM']\n if channel is None:\n for ax, ch in zip(axes.flatten(), self._channels):\n I1 = self._data[ch]\n ax.scatter(I0, I1, s=ms, alpha=alpha, label=None)\n reg = LinearRegression().fit(I0.values.reshape(-1, 1), I1)\n\n if ch in self._back_channels:\n label = None \n else:\n muA = absorption.loc[ch, \"muA\"]\n sigmaA = absorption.loc[ch, \"sigmaA\"]\n label = \"Abs: {:.3g} +/- {:.3g}\".format(muA, sigmaA) \n\n ax.plot(I0, reg.predict(I0.values.reshape(-1, 1)), \n c='#FF8000', lw=2, label=label)\n \n ax.set_xlabel(\"$I_0$\")\n ax.set_ylabel(\"$I_1$\")\n ax.set_title(ch)\n if ch not in self._back_channels:\n ax.legend()\n\n fig.tight_layout()\n elif channel.upper() in self._channels:\n ch = channel.upper()\n I1 = self._data[ch]\n axes[1][0].scatter(I0, I1, s=ms, alpha=alpha, label=None)\n reg = LinearRegression().fit(I0.values.reshape(-1, 1), I1)\n \n if ch in self._back_channels:\n label = None\n else:\n muA = absorption.loc[ch, \"muA\"]\n sigmaA = absorption.loc[ch, \"sigmaA\"]\n label = \"Abs: {:.3g} +/- {:.3g}\".format(muA, sigmaA) \n\n axes[1][0].plot(I0, reg.predict(I0.values.reshape(-1, 1)), \n c='#FF8000', lw=2, label=label)\n\n axes[1][0].set_xlabel(\"$I_0$\")\n axes[1][0].set_ylabel(\"$I_1$\")\n if ch not in self._back_channels:\n axes[1][0].legend()\n\n axes[0][0].hist(I0, bins=n_bins)\n axes[0][0].axvline(I0.mean(), c='#6A0888', ls='--')\n\n axes[1][1].hist(self._I1[ch],\n bins=n_bins, orientation='horizontal')\n axes[1][1].axhline(I1.mean(), c='#6A0888', ls='--')\n\n with np.errstate(divide='ignore', invalid='ignore'):\n absp = compute_absorption(I0, I1)\n axes[0][1].scatter(I0, absp, s=ms, alpha=alpha)\n axes[0][1].set_xlabel(\"$I_0$\")\n axes[0][1].set_ylabel(\"$-log(I_1/I_0)$\")\n if ch not in self._back_channels:\n axes[0][1].axhline(\n absorption.loc[ch, 'muA'], c='#FF8000', ls='--',\n label=\"SNR@labs: {:.3g}\".format(1. / sigmaA),\n )\n axes[0][1].legend()\n \n fig.suptitle(ch)\n fig.tight_layout(rect=[0, 0.03, 1, 0.95])\n else:\n raise ValueError(\"Not understandable input!\")\n \n return fig, axes\n\n def plot_spectrum(self, channel=None, *, figsize=(6, 4.5), capsize=4, \n n_bins=20, use_transmission=False):\n \"\"\"Generate spectrum plots.\n\n :param str channel: MCP channel name, e.g. MCP1, for visualizing\n a single channel, or None (default) for visualizing MCP1-3 \n altogether. Case insensitive.\n :param tuple figsize: figure size.\n :param int capsize: cap size for the error bar.\n :param int n_bins: number of energy bins.\n :param bool use_transmission: False for plotting energy vs. \n absorption, while True for plotting energy vs. I1.\n Default = False.\n \"\"\"\n import matplotlib.pyplot as plt\n plt.rcParams['font.size'] = 12\n\n spectrum = self.compute_spectrum(n_bins=n_bins)\n\n fig, ax = plt.subplots(figsize=figsize)\n\n if channel is None:\n for i, ch in enumerate(self._front_channels, 1):\n if use_transmission:\n y = spectrum['mu' + ch]\n y_err = spectrum['sigma' + ch] / \\\n spectrum['count'].apply(np.sqrt)\n else:\n y = spectrum['muA' + str(i)]\n y_err = spectrum['sigmaA' + str(i)] / \\\n spectrum['count'].apply(np.sqrt)\n\n ax.errorbar(spectrum.energy, y, y_err,\n capsize=capsize, fmt='.', label=ch)\n\n elif channel.upper() in self._front_channels:\n ch = channel.upper()\n idx = list(self._channels.keys()).index(ch) + 1\n\n if use_transmission:\n y = spectrum['mu' + ch]\n y_err = spectrum['sigma' + ch] / \\\n spectrum['count'].apply(np.sqrt)\n else:\n y = spectrum['muA{}'.format(idx)]\n y_err = spectrum['sigmaA{}'.format(idx)] / \\\n spectrum['count'].apply(np.sqrt)\n\n ax.errorbar(spectrum.energy, y, y_err,\n fmt='.', capsize=capsize, label=ch)\n else:\n raise ValueError(\"Not understandable input!\")\n\n ax.set_xlabel(\"Energy (eV)\")\n if use_transmission:\n ax.set_ylabel(\"I1\")\n else:\n ax.set_ylabel(\"Absorption\")\n\n ax.legend()\n\n fig.tight_layout()\n\n return fig, ax\n", "sub_path": "karaboXAS/xas_analyzer.py", "file_name": "xas_analyzer.py", "file_ext": "py", "file_size_in_byte": 26460, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.sqrt", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.log", "line_number": 79, "usage_type": "call"}, {"api_name": "abc.ABC", "line_number": 82, "usage_type": "attribute"}, {"api_name": "karabo_data.RunDirectory", "line_number": 90, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 116, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 153, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 193, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 193, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 202, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 218, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 226, "usage_type": "attribute"}, {"api_name": "numpy.ndarray", "line_number": 238, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 251, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 256, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 261, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 272, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 277, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 342, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 342, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 344, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 344, "usage_type": "name"}, {"api_name": "numpy.trapz", "line_number": 378, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 380, "usage_type": "call"}, {"api_name": "numpy.ravel", "line_number": 411, "usage_type": "call"}, {"api_name": "numpy.repeat", "line_number": 419, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 425, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 459, "usage_type": "call"}, {"api_name": "numpy.corrcoef", "line_number": 477, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 506, "usage_type": "call"}, {"api_name": "pandas.IndexSlice", "line_number": 526, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 533, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 566, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 566, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 570, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 570, "usage_type": "name"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 577, "usage_type": "call"}, {"api_name": "sklearn.linear_model.LinearRegression", "line_number": 600, "usage_type": "call"}, {"api_name": "numpy.errstate", "line_number": 624, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 658, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 658, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 662, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 662, "usage_type": "name"}, {"api_name": "numpy.sqrt", "line_number": 669, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 673, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 685, "usage_type": "attribute"}, {"api_name": "numpy.sqrt", "line_number": 689, "usage_type": "attribute"}]} +{"seq_id": "285676864", "text": "import xml.dom.minidom as dom, json\nfrom com.pajk.plazass.utils import DictUtil,CommonUtils,RegistryDao, ValidateUtil,AllHttpClientWriter,RocketMQWriter,DateTimeUtil,LogUtil,ScheduleDao,TairUtil,PatientCardDao\n# import_script 武汉大学人民医院/Abstract.py\n## 武汉大学人民医院 确认支付\nclass PayRegistry(Abstract):\n\n def getRequestParams(self, message):\n ''' 获取请求参数 '''\n reqData = json.loads(message[\"content\"])\n hospitalId = message[\"from\"]\n result = {}\n result[\"url\"] = DictUtil.getDictValue(hospitalId, \"serviceUrl\") + \"Sendbookclinic\"\n\n registryId = reqData.get(\"registryNo\")\n registryNo = RegistryDao.getRegistryNo(int(registryId))\n\n\n params = {}\n params[\"AppointmentId\"] = registryNo\n params[\"CreateTime\"] = TairUtil.getData(hospitalId + \"_\" + registryNo)\n params[\"PayPrice\"] = reqData.get(\"payAmount\") * 100 if reqData.get(\"payAmount\") else 0\n params[\"Status\"] = \"0\"\n params[\"PayMode\"] = \"92\"\n params[\"OrderIdPay\"] = reqData.get(\"payInstanceNo\")\n\n self.setCommonParams5(hospitalId, params)\n\n result[\"params\"] = params\n message[\"internalId\"] = registryId\n message[\"content\"] = result\n return message\n\n\n def getResponseParams(self, messageObj):\n content = messageObj[\"content\"]\n contentObj = json.loads(content)\n ReturnCode = int(contentObj[\"ReturnCode\"])\n\n if ReturnCode != 0:\n # 指定下一个引擎消息类型\n messageObj[\"msgType\"] = \"EXIT\"\n messageObj[\"returnCode\"] = \"ThirdError\"\n messageObj[\"returnMsg\"] = contentObj[\"Message\"]\n return messageObj\n\n data = self.setBookRespData(messageObj)\n\n messageObj[\"content\"] = data\n messageObj[\"contentType\"] = \"JSON\"\n return messageObj\n\n def setBookRespData(self, messageObj):\n content = messageObj[\"content\"]\n contentObj = json.loads(content)\n outputInfo = contentObj[\"OutputInfo\"]\n hospitalId = messageObj[\"from\"]\n reqData = json.loads(messageObj[\"extendContent\"])\n registry = RegistryDao.getRegistryById(int(reqData.get(\"registryNo\")))\n registryObj = json.loads(registry)\n schedule = ScheduleDao.getSchedule(hospitalId, registryObj.get(\"scheduleId\"))\n scheduleObj = json.loads(schedule)\n resourceNo = registryObj.get(\"resourceNo\")\n resourceNoArr = resourceNo.split(\"_\")\n\n bookRespData = {}\n bookRespData[\"visitTime\"] = registryObj.get(\"visitTime\")\n bookRespData[\"visitNoon\"] = scheduleObj.get(\"visitNoon\")\n bookRespData[\"cancelStopTime\"] = self.getCancelStopTime(hospitalId, registryObj.get(\"visitTime\"))\n if resourceNoArr and len(resourceNoArr) >= 2:\n ClinicType = resourceNoArr[1]\n if ClinicType == \"0\": ##分时段\n resourceId = registryObj.get(\"resourceId\")\n visitTimeArr = resourceId.split(\"_\")\n visitDTimeStart = visitTimeArr[0]\n visitTimeEnd = visitTimeArr[0]\n\n bookRespData[\"registryTime\"] = DateTimeUtil.plus(registryObj.get(\"visitTime\") + \" \" + visitDTimeStart, \"yyyy-MM-dd HH:mm\", -15,\"MINUTES\") # 取号时间\n bookRespData[\"visitTimeStart\"] = visitDTimeStart # 就诊开始时间\n bookRespData[\"visitTimeEnd\"] = visitTimeEnd # 就诊结束时间\n else:\n\n if scheduleObj.get(\"visitNoon\") == \"1\":\n bookRespData[\"registryTime\"] = registryObj.get(\"visitTime\") + \" 11:00\"\n else:\n bookRespData[\"registryTime\"] = registryObj.get(\"visitTime\") + \" 16:30\"\n\n bookRespData[\"hospitalRegistyNo\"] = outputInfo.get(\"AppointmentId\")\n bookRespData[\"hospitalId\"] = messageObj[\"from\"]\n bookRespData[\"patientId\"] = \"\"\n bookRespData[\"registryPassword\"] = \"\"\n bookRespData[\"registryLocation\"] = \"\"\n bookRespData[\"visitNo\"] = \"\"\n bookRespData[\"seeAddress\"] = \"\"\n bookRespData[\"registryNo\"] = messageObj[\"internalId\"] # 内部订单号\n bookRespData[\"status\"] = \"1\" # 1为挂号成功 3为锁号成功\n bookRespData[\"patientCardNo\"] = PatientCardDao.getCardNum(hospitalId, reqData.get(\"registryNo\")) ##就诊卡号\n bookRespData[\"sequenceNo\"] = TairUtil.getData(hospitalId + \"_\" + outputInfo.get(\"AppointmentId\") + \"_clinicSequence\")\n\n return bookRespData\n\n def getCancelStopTime(self, hospitalId, visitDate):\n if not visitDate:\n return \"\"\n time = DictUtil.getDictValue(hospitalId, \"cancelStopTime\")\n days = DictUtil.getDictValue(hospitalId, \"cancelStopDays\")\n date = DateTimeUtil.plusDate(visitDate, \"yyyy-MM-dd\", int(days), \"DAYS\")\n return date + \" \" + time\n\n\n def execute(self,meList):\n jsonList = json.loads(meList)\n message = jsonList[0]\n\n #1 请求参数验证\n errorMsg = ValidateUtil.validatePayParams(message[\"content\"])\n if errorMsg != None:\n message[\"returnCode\"] = \"ArgumentError\"\n message[\"returnMsg\"] = errorMsg\n return self.handleResult(message)\n \n # 2 缓存请求参数, 发送MQ的时候需要用到。\n message[\"extendContent\"] = message[\"content\"]\n\n #3 入参转换\n message = self.getRequestParams(message)\n if message[\"returnCode\"] != \"Success\":\n return self.handleResult(message)\n\n #4 调用webservice\n messageStr = AllHttpClientWriter().execute(json.dumps(message))\n message = json.loads(messageStr)\n if message[\"returnCode\"] != \"Success\":\n return self.handleResult(message)\n\n #5 结果mapping\n message = self.getResponseParams(message)\n if message[\"returnCode\"] != \"Success\":\n return self.handleResult(message)\n\n #6 保存数据库\n messageStr = RegistryDao.processPayRegistry(json.dumps(message, ensure_ascii=False))\n message = json.loads(messageStr)\n if message[\"returnCode\"] != \"Success\":\n return self.handleResult(message)\n\n #7 发MQ消息\n messageStr = RocketMQWriter().writer(json.dumps(message, ensure_ascii=False))\n message = json.loads(messageStr)\n if message[\"returnCode\"] != \"Success\":\n return self.handleResult(message)\n\n message[\"extendContent\"] = \"\"\n message[\"returnMsg\"] = u\"确认支付成功\"\n return self.handleResult(message)\n\nresult = PayRegistry().execute(params)\n", "sub_path": "whrm/PayRegistry.py", "file_name": "PayRegistry.py", "file_ext": "py", "file_size_in_byte": 6587, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "json.loads", "line_number": 9, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.DictUtil.getDictValue", "line_number": 12, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.DictUtil", "line_number": 12, "usage_type": "name"}, {"api_name": "com.pajk.plazass.utils.RegistryDao.getRegistryNo", "line_number": 15, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.RegistryDao", "line_number": 15, "usage_type": "name"}, {"api_name": "com.pajk.plazass.utils.TairUtil.getData", "line_number": 20, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.TairUtil", "line_number": 20, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 36, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 54, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 57, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.RegistryDao.getRegistryById", "line_number": 58, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.RegistryDao", "line_number": 58, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 59, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.ScheduleDao.getSchedule", "line_number": 60, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.ScheduleDao", "line_number": 60, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 61, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.DateTimeUtil.plus", "line_number": 77, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.DateTimeUtil", "line_number": 77, "usage_type": "name"}, {"api_name": "com.pajk.plazass.utils.PatientCardDao.getCardNum", "line_number": 96, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.PatientCardDao", "line_number": 96, "usage_type": "name"}, {"api_name": "com.pajk.plazass.utils.TairUtil.getData", "line_number": 97, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.TairUtil", "line_number": 97, "usage_type": "name"}, {"api_name": "com.pajk.plazass.utils.DictUtil.getDictValue", "line_number": 104, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.DictUtil", "line_number": 104, "usage_type": "name"}, {"api_name": "com.pajk.plazass.utils.DictUtil.getDictValue", "line_number": 105, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.DictUtil", "line_number": 105, "usage_type": "name"}, {"api_name": "com.pajk.plazass.utils.DateTimeUtil.plusDate", "line_number": 106, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.DateTimeUtil", "line_number": 106, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 111, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.ValidateUtil.validatePayParams", "line_number": 115, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.ValidateUtil", "line_number": 115, "usage_type": "name"}, {"api_name": "com.pajk.plazass.utils.AllHttpClientWriter", "line_number": 130, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 130, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 131, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.RegistryDao.processPayRegistry", "line_number": 141, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.RegistryDao", "line_number": 141, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 141, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 142, "usage_type": "call"}, {"api_name": "com.pajk.plazass.utils.RocketMQWriter", "line_number": 147, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 147, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 148, "usage_type": "call"}]} +{"seq_id": "569981005", "text": "from kerasClassify import make_dataset, evaluate_mlp_model, get_emails, write_csv, load_df_and_features\nfrom mlClassify import logreg_classifier\nfrom sklearn.dummy import DummyClassifier\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.svm import LinearSVC\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.feature_selection import SelectKBest, chi2\nfrom keras.utils import np_utils\n\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.neighbors import KNeighborsClassifier\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom datetime import datetime\nimport time\n\nimport pandas as pd\nfrom hpyutils import MyObj, setattrs, rsetattr\nfrom my_metrics import calc_metrics, print_metrics, plot_metrics\nfrom debug_ml import explain_predictions\n\n\n\ndef init_config():\n \n dataset_info = MyObj()\n \n dataset_info.csvEmailsFilePath = \"./data/enron_6_email_folders_Inboxes_KAMINSKI.tsv\"; # Dataset tsv file path. Each line is an email\n dataset_info.num_runs = 1\n # PreProcessing\n dataset_info.remove_stopwords = True # remove stopwords (english only for now)\n dataset_info.ngram_max = 2 # Max number of word ngrams (1 for unigram, 2 for bigram)\n dataset_info.vocab_size = 10000\n dataset_info.feature_type = 'tfidf' # Type of feature in matrix: binary (0/1), tfidf, count\n dataset_info.use_keras_tokenizer = False\n # Features\n dataset_info.toccDomains = True # Use to and cc email domains as features \n #-- Data \n # dataset_info.new_label_names = ['Save','DontSave'] # random select labels to map to one of the labels in array. mutually ex with labels_map\n dataset_info.labels_map = { 'Inbox' : 'DontSave','Notes inbox' : 'DontSave', 'default_mapping' : 'Save' } # manual mapping with default mapping\n #dataset_info.labels_map = { 'Inbox' : 'DontSave','Notes inbox' : 'DontSave', 'default_mapping' : 'Omit', 'Projects': 'Save'} # manual mapping with default mapping\n dataset_info.labels_map_filter_names = ['Omit']# values of labels to filter out in df\n dataset_info.sub_sample_mapped_labels = { 'Save': 650 ,'DontSave' : 650 }\n #dataset_info.class_weight = { 'Save': 6 ,'DontSave' : 1 }\n # dataset_info.new_total_samples = 100\n dataset_info.test_split = 0.1\n \n #save final dataframe to csv file only in case num_runs=1\n dataset_info.save_df = False\n \n #--force papulate cache\n dataset_info.force_papulate_cache = False\n \n \n dataset_info.random_seed = []\n \n ######### Data loading ###########\n dataset_info.load = MyObj()\n setattrs(dataset_info.load,\n # don't load large DF with many cols --> 10GB mem --> very slow runtime + crashes --> instead use cache --> search cached df with required fields for this exp\n required_cols = None, # Array of strings ['emb_w2v_body','emb_glv_body', ...] load from cache (or create cache) for extra fields, in addition to min cols set + text_cols \n min_cols = ['conversationId', 'createdDateTime', 'folderName', 'id', 'inferenceClassification', 'internetMessageId', 're', 'sentDateTime', 'subject',\n 'userId', 'sender', 'content', 'body', 'subj', 'PartId', 'label', 'group', 'to_rcpt', 'cc_rcpt']\n )\n \n ######### Preprocessing ###########\n dataset_info.preprocess = MyObj()\n setattrs(dataset_info.preprocess,\n text_cols = [ 'subject', 'content', 'to','cc'], # , 'people_format' # Important: Not used in old get_ngrams_data (.tsv)\n modifyFeatureVector = None, # accept (dataset_info.ds.df,dataset_info) and return a new df, modifying df['feature'] using Embeddings, KB \n select_best = 4000, # Number of features to keep in feature selection (disable if working )\n use_filtered = True,\n filtered_prefix = 'filt_', \n )\n \n ########################################### Training #####################################################\n dataset_info.train = MyObj()\n setattrs(dataset_info.train,\n classifier_func = logreg_classifier # evaluate_mlp_model # Default is Keras:\n )\n \n #-- NN Arch\n dataset_info.train.nn = MyObj()\n setattrs(dataset_info.train.nn,\n num_hidden = 512,\n dropout = 0.5,\n )\n \n ########################################### Metrics #####################################################\n dataset_info.metrics = MyObj()\n setattrs(dataset_info.metrics,\n fpr_thresh = 0.1, # Requires max fpr of 0.1 --> calc class proba threshold for binary classification \n report_metrics=['sel_tpr','sel_fpr','roc_auc', 'accuracy','precision','recall','f_score'], # Specify metrics from new_metrics to report (see metrics names in my_metrics.py) \n # testgroupby = 'sender' or 'to' # Report accuracy (correct predictions), by number of training samples per group (groupby sender test samples, for each group get its training samples)\n )\n \n ########################################### Hooks #####################################################\n dataset_info.hooks = MyObj()\n setattrs(dataset_info.hooks,\n afterFeatures = None, # function that is called after tokenization + feature extraction, before make_dataset (train/test split and subsample)\n )\n \n ######################## End Enron derived datasets experiments ##########################################\n if dataset_info.num_runs > 1 and dataset_info.save_df:\n raise Exception(\"Cannot use both save_df and num_runs > 1\")\n \n ######################## Params validation and computed params ##########################################\n if getattr(dataset_info,'labels_map',False) :\n if getattr(dataset_info, 'new_label_names', False):\n raise Exception(\"Cannot use both new_label_names and labels_map\")\n # Create new_label_names form labels_map unique values\n dataset_info.new_label_names = [i for i in list(set(dataset_info.labels_map.values())) if i not in getattr(dataset_info, \"labels_map_filter_names\", [])]\n \n if not dataset_info.load.required_cols and dataset_info.preprocess.modifyFeatureVector:\n print('Warning: override modifyFeatureVector - make sure all columns used in the function are specified at dataset_info.load.required_cols - or KeyError occurs')\n \n return dataset_info\n\ndataset_info = init_config()\n\nclass Dataset():\n def __init__(self):\n pass\n\n # sorted - get df sorted by column \"index_row\"\n def get_df(self, sorted=True):\n df = self.df\n if hasattr(df, \"label_filter_out\"):\n df = df[~df[getattr(self, \"filer_col_name\", \"label_filter_out\")] == True]\n if sorted and hasattr(self.df, 'index_row'):\n df = df.sort_values(by=['index_row'])\n return df\n\n def get_X_train(self):\n return self.df[(~self.df[getattr(self, 'train_col_name', 'train')].isnull()) &\n (~self.df[getattr(self, \"filer_col_name\", \"label_filter_out\")] == True)].sort_values(by=['index_row'])\n\n def get_X_test(self):\n return self.df[(~self.df['test'].isnull()) & (~self.df[getattr(self, \"filer_col_name\", \"label_filter_out\")] == True)].sort_values(by=['index_row'])\n\n def get_Y_train(self, X_train, to_categorical=False, num_labels=0):\n Y_train = X_train[getattr(self, 'label_col_name', 'label_num')].tolist()\n if (to_categorical):\n return np_utils.to_categorical(Y_train, num_labels)\n return Y_train\n\n def get_Y_test(self, X_test, to_categorical=False, num_labels=0):\n Y_test = X_test[getattr(self, 'label_col_name', 'label_num')].tolist()\n if (to_categorical):\n return np_utils.to_categorical(Y_test, num_labels)\n return Y_test\n\n def get_dataset(self, to_categorical=False, num_labels=0):\n X_train = self.get_X_train()\n X_test = self.get_X_test()\n Y_train = self.get_Y_train(X_train, to_categorical=to_categorical, num_labels=num_labels)\n Y_test = self.get_Y_test(X_test, to_categorical=to_categorical, num_labels=num_labels)\n X_train_features = np.array(X_train['features'].tolist()) \n X_test_features = np.array(X_test['features'].tolist())\n if hasattr(self, 'selected_features_idxs'):\n X_train_features = X_train_features[:, self.selected_features_idxs]\n X_test_features = X_test_features[:, self.selected_features_idxs]\n return (X_train_features, Y_train), (X_test_features, Y_test)\n \n def setFilterCol(self,new_filter_col_name, logicOr = True):\n '''\n Sets a new filter column\n new_filter_col_name - col name of existing boolean column (filter according to new condition)\n logicOr - logic OR the new col with old filter col (if old exist), to combine old and new filters. if False - only new filter is in effect\n '''\n old_filter_col_name = getattr(self,'filer_col_name',None)\n if old_filter_col_name and logicOr:\n self.df[new_filter_col_name] = self.df[new_filter_col_name] | self.df[old_filter_col_name] \n \n dataset_info.ds.filer_col_name = new_filter_col_name \n \n\ndef select_best_features(dataset_info, num_labels, num_best, verbose=True):\n (X_train, Y_train), (X_test, Y_test) = dataset_info.ds.get_dataset(to_categorical=True, num_labels=num_labels)\n if verbose:\n print('\\nSelecting %d best features\\n'%num_best)\n selector = SelectKBest(chi2, k=num_best)\n selector.fit_transform(X_train, Y_train)\n dataset_info.ds.selected_features_idxs = selector.get_support(indices=True).tolist()\n\n return selector.scores_\n\ndef plot_feature_scores(feature_names,scores,limit_to=None,save_to=None,best=True):\n plt.figure()\n if best:\n plt.title(\"Best features\")\n else:\n plt.title(\"Worst features\")\n if limit_to is None:\n limit_to = len(features_names)\n #for some reason index 0 always wrong\n scores = np.nan_to_num(scores)\n if best:\n indices = np.argsort(scores)[-limit_to:][::-1]\n else:\n indices = np.argsort(scores)[:limit_to]\n #indices = np.argpartition(scores,-limit_to)[-limit_to:]\n plt.bar(range(limit_to), scores[indices],color=\"r\", align=\"center\")\n plt.xticks(range(limit_to),np.array(feature_names)[indices],rotation='vertical')\n plt.xlim([-1, limit_to])\n plt.ylabel('Score')\n plt.xlabel('Word')\n plt.show(block=False)\n if save_to is not None:\n plt.savefig(save_to,bbox_inches='tight')\n\n\ndef make_plot(x,y,title=None,x_name=None,y_name=None,save_to=None,color='b',new_fig=True):\n if new_fig:\n plt.figure()\n plot = plt.plot(x,y,color)\n if title is not None:\n plt.title(title)\n if x_name is not None:\n plt.xlabel(x_name)\n if y_name is not None:\n plt.ylabel(y_name)\n if save_to is not None:\n plt.savefig(save_to,bbox_inches='tight')\n return plot\n\ndef make_plots(xs,ys,labels,title=None,x_name=None,y_name=None,y_bounds=None,save_to=None):\n colors = ('b', 'g', 'r', 'c', 'm', 'y', 'k')\n handles = []\n plt.figure()\n plt.hold(True)\n for i in range(len(labels)):\n plot, = make_plot(xs[i],ys[i],color=colors[i%len(colors)],new_fig=False)\n handles.append(plot)\n plt.legend(handles,labels)\n if title is not None:\n plt.title(title)\n if x_name is not None:\n plt.xlabel(x_name)\n if y_name is not None:\n plt.ylabel(y_name)\n if y_bounds is not None:\n plt.ylim(y_bounds)\n if save_to is not None:\n plt.savefig(save_to,bbox_inches='tight')\n plt.hold(False)\n\ndef get_baseline_dummy(dataset_info, num_labels,verbose=True):\n (X_train, Y_train), (X_test, Y_test) = dataset_info.ds.get_dataset(to_categorical=True, num_labels=num_labels)\n dummy = DummyClassifier()\n dummy.fit(X_train,dataset_info.ds.get_Y_train(X_train))\n predictions = dummy.predict(X_test)\n accuracy = accuracy_score(dataset_info.ds.get_Y_test(X_test),predictions)\n \n if verbose:\n print('Got baseline of %f with dummy classifier'%accuracy)\n\n return accuracy\n\ndef get_baseline_svm(dataset_info,verbose=True):\n (X_train, Y_train), (X_test, Y_test) = dataset_info.ds.get_dataset(to_categorical=True, num_labels=num_labels)\n linear = LinearSVC(penalty='l1',dual=False)\n grid_linear = GridSearchCV(linear, {'C':[0.1, 0.5, 1, 5, 10]}, cv=5)\n grid_linear.fit(X_train,dataset_info.ds.get_Y_train(X_train))\n accuracy = grid_linear.score(X_test, dataset_info.ds.get_Y_test(X_train))\n \n if verbose:\n print('Got baseline of %f with svm classifier'%accuracy)\n\n return accuracy\n\ndef get_baseline_knn(dataset_info,num_labels,verbose=True):\n (X_train, Y_train), (X_test, Y_test) = dataset_info.ds.get_dataset(to_categorical=True, num_labels=num_labels)\n knn = KNeighborsClassifier(n_neighbors=100,n_jobs=-1)\n knn.fit(X_train,dataset_info.ds.get_Y_train(X_train))\n predictions = np.round(knn.predict(X_test))\n accuracy = accuracy_score(dataset_info.ds.get_Y_test(X_train),predictions)\n\n if verbose:\n print('Got baseline of %f with linear regression '%accuracy)\n\n return accuracy\n\ndef get_baseline_pa(dataset_info,verbose=True):\n (X_train, Y_train), (X_test, Y_test) = dataset_info.ds.get_dataset(to_categorical=True, num_labels=num_labels)\n classifier = PassiveAggressiveClassifier(n_jobs=-1,fit_intercept=True)\n classifier.fit(X_train,dataset_info.ds.get_Y_train(X_train))\n accuracy = classifier.score(X_test,dataset_info.ds.get_Y_test(X_train))\n \n if verbose:\n print('Got baseline of %f with Passive Aggressive classifier'%accuracy)\n\n return accuracy\n \ndef run_once(verbose=True,test_split=0.1,ftype='binary',num_words=10000,select_best=4000, plot=True,plot_prefix='',graph_to=None): \n # Prepare features\n dataset_info.ds = Dataset()\n\n load_df_and_features(dataset_info.csvEmailsFilePath,dataset_info, num_words=num_words,matrix_type=ftype,verbose=verbose, max_n=dataset_info.ngram_max) \n # Create new features \n if dataset_info.preprocess.modifyFeatureVector:\n dataset_info.ds.df = dataset_info.preprocess.modifyFeatureVector(dataset_info.ds.df,dataset_info) \n \n # User defined hook to optionally modify data before train/test split\n if dataset_info.hooks.afterFeatures:\n dataset_info.hooks.afterFeatures(dataset_info)\n \n num_labels = len(dataset_info.label_names) \n # Create dataset including splits, sub sampling, labels mapping\n # ((X_train,Y_train_c),(X_test,Y_test_c)),Y_train,Y_test,num_labels\n num_labels = make_dataset(dataset_info) \n if select_best and select_bestmaxacc:\n maxacc = acc\n maxtype = ftype\n accs.append(acc)\n all_baselines.append(baselines)\n all_times.append(times)\n all_accs.append(accs)\n print('\\nWord count accuracies:%s\\n'%str(accs))\n make_plots(all_counts,all_accs,types,title='Test accuracy vs max words',y_name='Test accuracy',x_name='Max most frequent words',save_to='word_accs.png',y_bounds=(0,1))\n make_plots(all_counts,all_accs,types,title='Test accuracy vs max words',y_name='Test accuracy',x_name='Max most frequent words',save_to='word_accs_zoomed.png',y_bounds=(0.6,0.95))\n make_plots(all_counts,all_baselines,types,title='Baseline accuracy vs max words',y_name='Baseline accuracy',x_name='Max most frequent words',save_to='word_baseline_accs.png',y_bounds=(0,1))\n make_plots(all_counts,all_times,types,title='Time vs max words',y_name='Parse+test+train time (seconds)',x_name='Max most frequent words',save_to='word_times.png')\n print('\\nBest word accuracy %f with features %s\\n'%(maxacc,maxtype))\n\ndef test_hidden_dropout():\n #get emails once to pickle\n emails = get_emails(dataset_info.csvEmailsFilePath,verbose=False)\n\n dropouts = [0.25,0.5,0.75]\n all_accs = []\n all_counts = []\n all_times = []\n maxacc = 0\n maxh = 0\n for d in dropouts:\n hidden = [32,64,128,256,512,1024,2048]\n all_counts.append(hidden)\n accs=[]\n times=[]\n print('\\nTesting learning for dropout %f with hidden counts %s\\n'%(d,str(hidden)))\n for h in hidden:\n start = time.time()\n acc = sum([run_once(dropout=d,num_words=2500,num_hidden=h,plot=False,verbose=False,select_best=None)[3] for i in range(5)])/5.0\n end = time.time()\n elapsed = (end-start)/5.0\n times.append(elapsed)\n print('\\nGot acc %f for hidden count %d in %d seconds'%(acc,h,elapsed))\n if acc>maxacc:\n maxacc = acc\n maxh = h\n accs.append(acc)\n all_times.append(times)\n all_accs.append(accs)\n print('\\nWord count accuracies:%s\\n'%str(accs))\n make_plots(all_counts,all_accs,['Droupout=%f'%d for d in dropouts],title='Test accuracy vs num hidden',y_name='Test accuracy',x_name='Number of hidden units',save_to='hidden_accs.png',y_bounds=(0,1))\n make_plots(all_counts,all_accs,['Droupout=%f'%d for d in dropouts],title='Test accuracy vs num hidden',y_name='Test accuracy',x_name='Number of hidden units',save_to='hidden_accs_zoomed.png',y_bounds=(0.8,1))\n make_plots(all_counts,all_times,['Droupout=%f'%d for d in dropouts],title='Time vs max words',y_name='Parse+test+train time (seconds)',x_name='Number of hidden units',save_to='hidden_times.png')\n print('\\nBest word accuracy %f with hidden %d\\n'%(maxacc,maxh))\n\ndef test_select_words(num_hidden=512):\n #get emails once to pickle\n emails = get_emails(dataset_info.csvEmailsFilePath,verbose=False)\n\n word_counts = [2500,3500,4500,5500]\n all_accs = []\n all_counts = []\n all_times = []\n maxacc = 0\n maxs = None\n for word_count in word_counts:\n select = [0.5,0.6,0.7,0.8,0.9]\n all_counts.append(select)\n accs=[]\n times=[]\n print('\\nTesting learning for word count %d with selects %s\\n'%(word_count,str(select)))\n for s in select:\n start = time.time()\n acc = sum([run_once(num_hidden=num_hidden,dropout=0.1,num_words=word_count,plot=False,verbose=False,select_best=int(s*word_count))[3] for i in range(5)])/5.0\n end = time.time()\n elapsed = (end-start)/5.0\n times.append(elapsed)\n print('\\nGot acc %f for select ratio %f in %d seconds'%(acc,s,elapsed))\n if acc>maxacc:\n maxacc = acc\n maxs = s\n accs.append(acc)\n all_times.append(times)\n all_accs.append(accs)\n print('\\nWord count accuracies:%s\\n'%str(accs))\n make_plots(all_counts,all_accs,['Words=%d'%w for w in word_counts],title='Test accuracy vs ratio of words kept',y_name='Test accuracy',x_name='Ratio of best words kept',save_to='select_accs_%d.png'%num_hidden,y_bounds=(0,1))\n make_plots(all_counts,all_accs,['Words=%d'%w for w in word_counts],title='Test accuracy vs ratio of words kept',y_name='Test accuracy',x_name='Ratio of best words kept',save_to='select_accs_zoomed_%d.png'%num_hidden,y_bounds=(0.8,1))\n make_plots(all_counts,all_times,['Words=%d'%w for w in word_counts],title='Time vs ratio of words kept',y_name='Parse+test+train time (seconds)',x_name='Ratio of best words kept',save_to='select_times_%d.png'%num_hidden,y_bounds=(0,65))\n print('\\nBest word accuracy %f with select %f\\n'%(maxacc,maxs))\n\n\n# ------- Experiments -----------------------------\n# True to run feature extraction, selection + svm baseline (~ 0.78)\n \nrun_baseline = False\n#test_features_words()\n#test_hidden_dropout()\n#test_select_words(128)\n#test_select_words(32)\n#test_select_words(16)\n\n# TODO: try ftype = 'tfidf'\n\ndef output_single_run_stats(df_test_metrics): \n print('Test single runs stats:')\n print(df_test_metrics)\n\n\ndef run_exp():\n '''\n Multiple runs of a single config (for avg stats)\n '''\n # Create metrics tracking dataframe for multiple runs, where each column is a metric (acc,prec,recall,f1 ...)\n import io\n \n metrics_columns=[mtr_name for mtr_name in dataset_info.metrics.report_metrics]\n metrics_dtype=[np.float for d in range(0,len(metrics_columns))]\n df_test_metrics = pd.read_csv(io.StringIO(\"\"), names=metrics_columns, dtype=dict(zip(metrics_columns,metrics_dtype))) # pd.DataFrame(columns=metrics_columns,dtype=metrics_dtype)\n dataset_info.state = MyObj()\n for i in range(0,dataset_info.num_runs):\n if len(dataset_info.random_seed) <= i:\n dataset_info.random_seed.append(int(time.time()))\n dataset_info.state.index_random_seed = i\n *dummy,new_metrics = run_once(num_words=dataset_info.vocab_size,ftype=dataset_info.feature_type,test_split=dataset_info.test_split, plot=False if dataset_info.num_runs > 1 else True, verbose=True,select_best=dataset_info.preprocess.select_best)\n df_test_metrics.loc[i] = [getattr(new_metrics,mtr_name) for mtr_name in dataset_info.metrics.report_metrics]\n \n print('random seed {}:', dataset_info.random_seed)\n output_single_run_stats(df_test_metrics)\n \n if hasattr(dataset_info, 'save_df') and dataset_info.save_df:\n write_csv('final_df.tsv', dataset_info.ds.df, verbose=True) \n return df_test_metrics\n\n\n########################## Running multipe configs (datset_info), each num_runs ########################################\ndef run_multi_exps_configs(exps): \n global dataset_info\n start_time = datetime.now()\n df_results = pd.DataFrame() # array of df - a df per config\n for exp in exps:\n dataset_info = exp.dataset_info\n df_test_metrics = run_exp()\n exp.tag_metrics(dataset_info,df_test_metrics)\n df_results = pd.concat([df_results,df_test_metrics])\n time_elapsed = datetime.now() - start_time\n print('Time elapsed (hh:mm:ss.ms) {}'.format(time_elapsed)) \n return df_results\n\nclass BaseExp:\n def __init__(self):\n self.dataset_info = init_config()\n def tag_metrics(self,dataset_info,df_test_metrics):\n pass\n\ndef createMultipleConfigExps(arrDctParams, getExpInstance):\n '''\n Return a list of experiments with different params, according to dctParams\n '''\n if type(arrDctParams)!=list or len(arrDctParams)== 0:\n raise Exception('createMultipleConfigExps: arrDctParams must be of type list and contain at least a single empty dict, otherwise 0 experiments will be returned')\n exps = []\n for dctPrms in arrDctParams:\n exp = getExpInstance()\n for prmKey in list(dctPrms.keys()):\n prmVal = dctPrms[prmKey] \n rsetattr(exp.dataset_info,prmKey,prmVal)\n exps.append(exp) \n return exps\n ", "sub_path": "pyScripts/kerasExperiments.py", "file_name": "kerasExperiments.py", "file_ext": "py", "file_size_in_byte": 25713, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "hpyutils.MyObj", "line_number": 27, "usage_type": "call"}, {"api_name": "hpyutils.MyObj", "line_number": 59, "usage_type": "call"}, {"api_name": "hpyutils.setattrs", "line_number": 60, "usage_type": "call"}, {"api_name": "hpyutils.MyObj", "line_number": 68, "usage_type": "call"}, {"api_name": "hpyutils.setattrs", "line_number": 69, "usage_type": "call"}, {"api_name": "hpyutils.MyObj", "line_number": 78, "usage_type": "call"}, {"api_name": "hpyutils.setattrs", "line_number": 79, "usage_type": "call"}, {"api_name": "mlClassify.logreg_classifier", "line_number": 80, "usage_type": "name"}, {"api_name": "hpyutils.MyObj", "line_number": 84, "usage_type": "call"}, {"api_name": "hpyutils.setattrs", "line_number": 85, "usage_type": "call"}, {"api_name": "hpyutils.MyObj", "line_number": 91, "usage_type": "call"}, {"api_name": "hpyutils.setattrs", "line_number": 92, "usage_type": "call"}, {"api_name": "hpyutils.MyObj", "line_number": 99, "usage_type": "call"}, {"api_name": "hpyutils.setattrs", "line_number": 100, "usage_type": "call"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 145, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 145, "usage_type": "name"}, {"api_name": "keras.utils.np_utils.to_categorical", "line_number": 151, "usage_type": "call"}, {"api_name": "keras.utils.np_utils", "line_number": 151, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 160, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectKBest", "line_number": 183, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.chi2", "line_number": 183, "usage_type": "argument"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "numpy.nan_to_num", "line_number": 198, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 202, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 204, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 204, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 205, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 205, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 206, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 206, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 207, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 208, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 208, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 209, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 209, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 217, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 217, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 221, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 225, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 225, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hold", "line_number": 232, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 232, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 236, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 236, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 238, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 240, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 240, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 242, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 242, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 244, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 244, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 246, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 246, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hold", "line_number": 247, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 247, "usage_type": "name"}, {"api_name": "sklearn.dummy.DummyClassifier", "line_number": 251, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 254, "usage_type": "call"}, {"api_name": "sklearn.svm.LinearSVC", "line_number": 263, "usage_type": "call"}, {"api_name": "sklearn.model_selection.GridSearchCV", "line_number": 264, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 275, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 277, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 278, "usage_type": "call"}, {"api_name": "sklearn.linear_model.PassiveAggressiveClassifier", "line_number": 287, "usage_type": "call"}, {"api_name": "kerasClassify.load_df_and_features", "line_number": 300, "usage_type": "call"}, {"api_name": "kerasClassify.make_dataset", "line_number": 312, "usage_type": "call"}, {"api_name": "my_metrics.calc_metrics", "line_number": 335, "usage_type": "call"}, {"api_name": "my_metrics.print_metrics", "line_number": 342, "usage_type": "call"}, {"api_name": "my_metrics.plot_metrics", "line_number": 344, "usage_type": "call"}, {"api_name": "kerasClassify.get_emails", "line_number": 352, "usage_type": "call"}, {"api_name": "time.time", "line_number": 369, "usage_type": "call"}, {"api_name": "time.time", "line_number": 372, "usage_type": "call"}, {"api_name": "time.time", "line_number": 377, "usage_type": "call"}, {"api_name": "time.time", "line_number": 380, "usage_type": "call"}, {"api_name": "kerasClassify.get_emails", "line_number": 400, "usage_type": "call"}, {"api_name": "time.time", "line_number": 415, "usage_type": "call"}, {"api_name": "time.time", "line_number": 417, "usage_type": "call"}, {"api_name": "kerasClassify.get_emails", "line_number": 435, "usage_type": "call"}, {"api_name": "time.time", "line_number": 450, "usage_type": "call"}, {"api_name": "time.time", "line_number": 452, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 494, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 495, "usage_type": "call"}, {"api_name": "io.StringIO", "line_number": 495, "usage_type": "call"}, {"api_name": "hpyutils.MyObj", "line_number": 496, "usage_type": "call"}, {"api_name": "time.time", "line_number": 499, "usage_type": "call"}, {"api_name": "kerasClassify.write_csv", "line_number": 508, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 515, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 515, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 516, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 521, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 522, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 522, "usage_type": "name"}, {"api_name": "hpyutils.rsetattr", "line_number": 543, "usage_type": "call"}]} +{"seq_id": "592738830", "text": "import json\r\nimport requests\r\nimport time\r\nimport urllib\r\nfrom dbhelper import DBHelper\r\n\r\ndb = DBHelper()\r\n\r\nTOKEN = \"866622878:AAE_7BkgoyC3W6RUsnO-jAXtJDIbTGP6jTw\"\r\nURL = \"https://api.telegram.org/bot{}/\".format(TOKEN)\r\n\r\ndef get_url(url):\r\n response = requests.get(url)\r\n content = response.content.decode(\"utf8\") # necessary for some python versions\r\n return content\r\ndef get_json_from_url(url):\r\n content = get_url(url)\r\n js = json.loads(content)\r\n return js\r\ndef get_last_update_id(updates): # To get latest value of update id\r\n update_ids = []\r\n for update in updates[\"result\"]:\r\n update_ids.append(int(update[\"update_id\"]))\r\n return max(update_ids)\r\ndef get_updates(offset=None):\r\n url = URL + \"getUpdates\"\r\n if offset:\r\n url += \"?offset={}\".format(offset)\r\n js = get_json_from_url(url)\r\n return js\r\n\r\ndef handle_updates(updates):\r\n for update in updates[\"result\"]:\r\n text = update[\"message\"][\"text\"]\r\n chat = update[\"message\"][\"chat\"][\"id\"]\r\n checkpoint = db.get_items(chat)\r\n if text == \"/start\":\r\n keyboard = build_keyboard([\"Hi! Nice to meet you!\"])\r\n send_message(\"Hi there, I guess you found me! At the time you are reading this, Ryan should already be in Thailand. He's sorry he couldn't be with you... so he made me! My name is Mister Bot. It is a pleasure to meet you.\", chat, keyboard)\r\n db.add_item(\"0\", chat)\r\n elif text.startswith(\"/\"):\r\n continue\r\n elif \"0\" in checkpoint and text == \"Hi! Nice to meet you!\":\r\n keyboard = build_keyboard([\"Thanks! So... what brings you here?\"])\r\n send_message(\"Hi! omg please forgive me, i forgot to wish you earlier... Happy Birthday! I hope you have a great day ahead!\", chat, keyboard)\r\n db.delete_item(\"0\", chat)\r\n db.add_item(\"1\", chat)\r\n elif \"1\" in checkpoint and text == \"Thanks! So... what brings you here?\":\r\n keyboard = build_keyboard([\"What mission?\",\"Umm.. okay\"])\r\n send_message(\"Oh haha i am here on a mission...\", chat, keyboard)\r\n db.delete_item(\"1\", chat)\r\n db.add_item(\"2\", chat)\r\n elif \"2\" in checkpoint:\r\n if text == \"What mission?\":\r\n keyboard = build_keyboard([\"Ok!\",\"Nah bruh\"])\r\n send_message(\"I am here to deliver you instructions to your gift. So, listen up!\", chat, keyboard)\r\n elif text == \"Ok!\":\r\n keyboard = build_keyboard([\"Ok!\"])\r\n send_message(\"Nice! Let's begin\", chat, keyboard)\r\n db.delete_item(\"2\", chat)\r\n db.add_item(\"4\", chat)\r\n elif text == \"Nah bruh\":\r\n keyboard = build_keyboard([\"Yes\", \"No\"])\r\n send_message(\"Well then, I guess we can't start until you are interested. Are you?\", chat, keyboard)\r\n db.delete_item(\"2\", chat)\r\n db.add_item(\"3\", chat)\r\n elif text == \"Umm.. okay\":\r\n keyboard = build_keyboard([\"I am!\",\"Yeaa i am not\"])\r\n send_message(\"You dont seem very interested.. Maybe i shouldn't go on\", chat, keyboard)\r\n elif text == \"I am!\":\r\n keyboard = build_keyboard([\"Ok!\"])\r\n send_message(\"Nice! Let's begin\", chat, keyboard)\r\n db.delete_item(\"2\", chat)\r\n db.add_item(\"4\", chat)\r\n elif text == \"Yeaa i am not\":\r\n keyboard = build_keyboard([\"Yes\", \"No\"])\r\n send_message(\"Well then, I guess we can't start until you are interested. Are you?\", chat, keyboard)\r\n db.delete_item(\"2\", chat)\r\n db.add_item(\"3\", chat)\r\n elif \"3\" in checkpoint:\r\n if text == \"Yes\":\r\n keyboard = build_keyboard([\"Ok!\"])\r\n send_message(\"Nice! Let's begin\", chat, keyboard)\r\n db.delete_item(\"3\", chat)\r\n db.add_item(\"4\", chat)\r\n if text == \"No\":\r\n keyboard = build_keyboard([\"Yes\", \"No\"])\r\n send_message(\"Well then, I guess we can't start until you are interested. Are you?\", chat, keyboard)\r\n elif \"4\" in checkpoint:\r\n keyboard = build_keyboard([\"Ok!\"])\r\n send_message(\"So, firstly you have to get to this location... You know where it is LOL, Ryan's house. When you get to the door, tell me.\", chat, keyboard)\r\n db.delete_item(\"4\", chat)\r\n db.add_item(\"5\", chat)\r\n elif \"5\" in checkpoint:\r\n keyboard = build_keyboard([\"I have reached!\",\"I have not reached.\"])\r\n send_message(\"Have you reached?\", chat, keyboard)\r\n db.delete_item(\"5\", chat)\r\n db.add_item(\"6\", chat)\r\n elif \"6\" in checkpoint:\r\n if text == \"I have reached!\":\r\n keyboard = build_keyboard([\"Ok!\"])\r\n send_message(\"Nice! Look for at the top rack of the shoerack, you will find your present there!\", chat, keyboard)\r\n db.delete_item(\"6\", chat)\r\n db.add_item(\"7\", chat)\r\n if text == \"I have not reached.\":\r\n keyboard = build_keyboard([\"I have reached!\",\"I have not reached.\"])\r\n send_message(\"Have you reached?\", chat, keyboard)\r\n elif \"7\" in checkpoint:\r\n keyboard = build_keyboard([\"Bye!\"])\r\n send_message(\"Yay! You found your present! Go back and open it up! Bye! It was nice meeting you\", chat, keyboard)\r\n\r\n\r\n\r\ndef get_last_chat_id_and_text(updates):\r\n num_updates = len(updates[\"result\"])\r\n last_update = num_updates - 1\r\n text = updates[\"result\"][last_update][\"message\"][\"text\"]\r\n chat_id = updates[\"result\"][last_update][\"message\"][\"chat\"][\"id\"]\r\n return (text, chat_id)\r\n\r\ndef build_keyboard(items): # To make a custom Telegram keyboard\r\n keyboard = [[item] for item in items]\r\n reply_markup = {\"keyboard\":keyboard, \"one_time_keyboard\": True}\r\n return json.dumps(reply_markup)\r\n\r\ndef send_message(text, chat_id, reply_markup=None):\r\n text = urllib.parse.quote_plus(text)\r\n url = URL + \"sendMessage?text={}&chat_id={}&parse_mode=Markdown\".format(text, chat_id)\r\n if reply_markup:\r\n url += \"&reply_markup={}\".format(reply_markup)\r\n get_url(url)\r\n\r\ndef main():\r\n db.setup()\r\n last_update_id = None\r\n while True:\r\n print('getting updates')\r\n updates = get_updates(last_update_id)\r\n if len(updates[\"result\"]) > 0:\r\n last_update_id = get_last_update_id(updates) + 1\r\n handle_updates(updates)\r\n time.sleep(0.5)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()", "sub_path": "meesterr_bot.py", "file_name": "meesterr_bot.py", "file_ext": "py", "file_size_in_byte": 6679, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "dbhelper.DBHelper", "line_number": 7, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 13, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 18, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 124, "usage_type": "call"}, {"api_name": "urllib.parse.quote_plus", "line_number": 127, "usage_type": "call"}, {"api_name": "urllib.parse", "line_number": 127, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 142, "usage_type": "call"}]} +{"seq_id": "251565405", "text": "from base64 import b64encode\nimport hashlib\nimport json\nimport time\n\nimport requests\n\nimport syslog\n\n\nclass sendTemplateSms():\n '''用来发送通知短信的应用'''\n def __init__(self):\n self.accountSid = 'aaf98f8951eb7f810151f189e5970ea3'\n self.accountToken = 'cc36e2edc9e9443ca5f2ff6f5191dc33'\n # self.appId = '8a48b551523a5c1201523e9e8f3b0a01'\n self.serverIP = 'https://app.cloopen.com'\n self.serverPort = '8883'\n self.softVersion = '2013-12-26'\n\n def send_template_sms(self, to, msg, template_id, app_name):\n self.local_time = self.get_local_time()\n if app_name == 'invest':\n app_id = '8a48b551523a5c1201523e9e8f3b0a01'\n else:\n app_id = 'aaf98f895350b688015354bb7bf8072d'\n url = '{}:{}{}'.format(self.serverIP, self.serverPort, self.get_request_header())\n self.write_log('send_sms_url---time', '{}---{}'.format(url, self.local_time))\n headers = self.get_http_header()\n data = self.get_request_params(to, msg, template_id, app_id)\n result = requests.post(url=url, data=json.dumps(data), headers=headers).text\n self.write_log('send_sms_msg:{}'.format(to), '{}===>{}--{}'.format(result, msg, template_id))\n if 'statusCode' in result:\n statusCode = json.loads(result)['statusCode']\n if statusCode == '000000':\n return True\n return False\n\n def get_request_header(self):\n return '/{}/Accounts/{}/SMS/TemplateSMS?sig={}'.format(self.softVersion, self.accountSid, self.get_sig())\n\n def get_http_header(self):\n auth_str = '{}:{}'.format(self.accountSid, self.local_time)\n return {'Accept': 'application/json',\n 'Content-Type': 'application/json;charset=utf-8',\n 'Content-Length': '1024',\n 'Authorization': b64encode(auth_str.encode()).decode()}\n\n def get_request_params(self, to, data=['', ''], template_id='1', app_id=''):\n request_params = {'to': to,\n 'appId': app_id,\n 'templateId': template_id,\n 'datas': data}\n return request_params\n\n def get_sig(self):\n '''账户Id + 账户授权令牌 + 时间戳'''\n sig_str = '{}{}{}'.format(self.accountSid, self.accountToken, self.local_time)\n return hashlib.md5(sig_str.encode()).hexdigest().upper()\n\n def get_local_time(self):\n return time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n\n def write_log(self, method, msg):\n syslog.openlog(method, syslog.LOG_LOCAL0)\n syslog.syslog(syslog.LOG_INFO, msg)\n", "sub_path": "sbin/sms_ytx_sdk.py", "file_name": "sms_ytx_sdk.py", "file_ext": "py", "file_size_in_byte": 2657, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "requests.post", "line_number": 31, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 31, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 34, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 47, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 59, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 62, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 62, "usage_type": "call"}, {"api_name": "time.time", "line_number": 62, "usage_type": "call"}, {"api_name": "syslog.openlog", "line_number": 65, "usage_type": "call"}, {"api_name": "syslog.LOG_LOCAL0", "line_number": 65, "usage_type": "attribute"}, {"api_name": "syslog.syslog", "line_number": 66, "usage_type": "call"}, {"api_name": "syslog.LOG_INFO", "line_number": 66, "usage_type": "attribute"}]} +{"seq_id": "51936629", "text": "import pymongo\nimport imp\nimport os\nimport sys\n\ndef error_exit(msg):\n print(msg)\n sys.exit(1)\n\n\ndef maintain(db_url, db_name, day):\n uri_info = pymongo.uri_parser.parse_uri(db_url)\n nodelist = uri_info[\"nodelist\"][0]\n conn = pymongo.MongoClient(*nodelist)\n\n db = conn[db_name]\n if uri_info['username']:\n if not db.authenticate(uri_info['username'], uri_info['password']):\n error_exit(\"authenticate error\")\n\n sys.path.append(os.path.dirname(os.path.realpath(__file__)))\n\n db_check = __import__(\"db_check.startup\")\n db_check.startup.start(db)\n\n dayup = __import__(str(day) + \".startup\")\n dayup.startup.start(db)\n", "sub_path": "maintain/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 664, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.exit", "line_number": 8, "usage_type": "call"}, {"api_name": "pymongo.uri_parser.parse_uri", "line_number": 12, "usage_type": "call"}, {"api_name": "pymongo.uri_parser", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pymongo.MongoClient", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path.append", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 21, "usage_type": "call"}]} +{"seq_id": "220923077", "text": "\nfrom magic.spell import CharEffectSpell, Spell, DelayedDamageSpell\nfrom magic.utilities import *\nimport random\nimport wolfpack\nfrom wolfpack.utilities import tobackpack, energydamage\n\ndef onLoad():\n\tEarthquake().register(57)\n\tEnergyVortex().register(58)\n\tResurrection().register(59)\n\tSummonAirElement().register(60)\n\tSummonDaemon().register(61)\n\tSummonEarthElement().register(62)\n\tSummonFireElement().register(63)\n\tSummonWaterElement().register(64)\n\nclass Earthquake(Spell):\n\tdef __init__(self):\n\t\tSpell.__init__(self, 8)\n\t\tself.reagents = {REAGENT_BLOODMOSS: 1, REAGENT_MANDRAKE: 1, REAGENT_GINSENG: 1, REAGENT_SULFURASH: 1}\n\t\tself.mantra = 'In Vas Por'\n\n\tdef cast(self, char, mode, args=[], target=None, item=None):\n\t\tif not self.consumerequirements(char, mode, args, target, item):\n\t\t\treturn\n\n\t\tif char.player:\n\t\t\tparty = char.party\n\t\t\tguild = char.guild\n\t\telse:\n\t\t\tparty = None\n\t\t\tguild = None\n\n\t\ttargets = []\n\n\t\tspellrange = 1 + int(char.skill[MAGERY] / 150.0)\n\t\tchars = wolfpack.chars(char.pos.x, char.pos.y, char.pos.map, spellrange)\n\t\tfor target in chars:\n\t\t\tif target == char:\n\t\t\t\tcontinue\n\n\t\t\tif (guild and target.guild == guild) or (party and target.party == party):\n\t\t\t\tcontinue\n\n\t\t\tif not char.canreach(target, spellrange):\n\t\t\t\tcontinue\n\n\t\t\ttargets.append(target)\n\n\t\tfor target in targets:\n\t\t\ttarget.soundeffect(0x2F3)\n\t\t\tself.harmchar(char, target)\n\n\t\t\tdamage = target.hitpoints / 2\n\t\t\tif target.player:\n\t\t\t\tdamage += random.randint(0, 15)\n\n\t\t\tdamage = min(100, max(15, damage))\n\t\t\tenergydamage(target, char, damage, physical=100)\n\nclass EnergyVortex(Spell):\n\tdef __init__(self):\n\t\tSpell.__init__(self, 8)\n\t\tself.reagents = {REAGENT_BLACKPEARL: 1, REAGENT_MANDRAKE: 1, REAGENT_BLOODMOSS: 1, REAGENT_NIGHTSHADE: 1}\n\t\tself.mantra = 'Vas Corp Por'\n\t\tself.validtarget = TARGET_GROUND\n\n\tdef target(self, char, mode, targettype, target, args, item):\n\t\tchar.turnto(target)\n\n\t\tif not self.consumerequirements(char, mode, args, target, item):\n\t\t\treturn\n\n\t\tev = wolfpack.addnpc('summoned_energy_vortex', target)\n\t\tev.summontime = wolfpack.time.currenttime() + 120000\n\t\tev.summoned = 1\n\t\tev.soundeffect(0x212)\n\nclass Resurrection(Spell):\n\tdef __init__(self):\n\t\tSpell.__init__(self, 8)\n\t\tself.reagents = {REAGENT_BLOODMOSS: 1, REAGENT_GARLIC: 1, REAGENT_GINSENG: 1}\n\t\tself.mantra = 'An Corp'\n\t\tself.validtarget = TARGET_CHAR\n\n\tdef target(self, char, mode, targettype, target, args, item):\n\t\tchar.turnto(target)\n\n\t\tif not target.player:\n\t\t\tchar.socket.clilocmessage(503348)\n\t\t\treturn\n\n\t\tif not target.dead:\n\t\t\tchar.socket.clilocmessage(501041)\n\t\t\treturn\n\n\t\tif not self.consumerequirements(char, mode, args, target, item):\n\t\t\treturn\n\n\t\ttarget.resurrect()\n\t\ttarget.soundeffect(0x214)\n\nclass SummonElementBase(Spell):\n\tdef __init__(self):\n\t\tSpell.__init__(self, 8)\n\t\tself.elementid = ''\n\t\tself.validtarget = TARGET_GROUND\n\t\tself.reagents = {REAGENT_BLOODMOSS: 1, REAGENT_MANDRAKE: 1, REAGENT_SPIDERSILK: 1}\n\t\tself.casttime = 6000\n\n\tdef target(self, char, mode, targettype, target, args, item):\n\t\tchar.turnto(target)\n\n\t\t# Lowest controlslots we see is 2, Earth Elemental\n\t\tif char.player and char.controlslots + 2 > 5:\n\t\t\tchar.socket.clilocmessage(1049645)\n\t\t\treturn\n\n\t\tif not self.consumerequirements(char, mode, args, target, item):\n\t\t\treturn\n\n\t\tcreature = wolfpack.addnpc(self.elementid, target)\n\t\t# If the creature is out of our control, delete it.\n\t\tif char.player and char.controlslots + creature.controlslots > 5:\n\t\t\tcreature.delete()\n\t\t\tchar.socket.clilocmessage(1049645)\n\t\telse:\n\t\t\tcreature.addscript('speech.pets')\n\t\t\tcreature.owner = char\n\t\t\tcreature.summontime = wolfpack.time.currenttime() + 120000\n\t\t\tcreature.summoned = 1\n\t\t\tcreature.soundeffect(0x217)\n\nclass SummonAirElement(SummonElementBase):\n\tdef __init__(self):\n\t\tSummonElementBase.__init__(self)\n\t\tself.mantra = 'Kal Vas Xen Hur'\n\t\tself.elementid = 'summoned_air_elemental'\n\t\tself.casttime = 6000\n\nclass SummonEarthElement(SummonElementBase):\n\tdef __init__(self):\n\t\tSummonElementBase.__init__(self)\n\t\tself.mantra = 'Kal Vas Xen Ylem'\n\t\tself.elementid = 'summoned_earth_elemental'\n\t\tself.casttime = 6000\n\nclass SummonFireElement(SummonElementBase):\n\tdef __init__(self):\n\t\tSummonElementBase.__init__(self)\n\t\tself.mantra = 'Kal Vas Xen Flam'\n\t\tself.reagents = {REAGENT_BLOODMOSS: 1, REAGENT_MANDRAKE: 1, REAGENT_SPIDERSILK: 1, REAGENT_SULFURASH: 1}\n\t\tself.elementid = 'summoned_fire_elemental'\n\t\tself.casttime = 6000\n\nclass SummonWaterElement(SummonElementBase):\n\tdef __init__(self):\n\t\tSummonElementBase.__init__(self)\n\t\tself.mantra = 'Kal Vas Xen An Flam'\n\t\tself.elementid = 'summoned_water_elemental'\n\t\tself.casttime = 6000\n\nclass SummonDaemon(SummonElementBase):\n\tdef __init__(self):\n\t\tSummonElementBase.__init__(self)\n\t\tself.mantra = 'Kal Vas Xen Corp'\n\t\tself.reagents = {REAGENT_BLOODMOSS: 1, REAGENT_MANDRAKE: 1, REAGENT_SPIDERSILK: 1, REAGENT_SULFURASH: 1}\n\t\tself.elementid = 'summoned_daemon'\n\t\tself.casttime = 6000\n", "sub_path": "tags/Release_12_9_9/server/release/scripts/magic/circle8.py", "file_name": "circle8.py", "file_ext": "py", "file_size_in_byte": 4891, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "magic.spell.Spell", "line_number": 18, "usage_type": "name"}, {"api_name": "magic.spell.Spell.__init__", "line_number": 20, "usage_type": "call"}, {"api_name": "magic.spell.Spell", "line_number": 20, "usage_type": "name"}, {"api_name": "wolfpack.chars", "line_number": 38, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 57, "usage_type": "call"}, {"api_name": "wolfpack.utilities.energydamage", "line_number": 60, "usage_type": "call"}, {"api_name": "magic.spell.Spell", "line_number": 62, "usage_type": "name"}, {"api_name": "magic.spell.Spell.__init__", "line_number": 64, "usage_type": "call"}, {"api_name": "magic.spell.Spell", "line_number": 64, "usage_type": "name"}, {"api_name": "wolfpack.addnpc", "line_number": 75, "usage_type": "call"}, {"api_name": "wolfpack.time.currenttime", "line_number": 76, "usage_type": "call"}, {"api_name": "wolfpack.time", "line_number": 76, "usage_type": "attribute"}, {"api_name": "magic.spell.Spell", "line_number": 80, "usage_type": "name"}, {"api_name": "magic.spell.Spell.__init__", "line_number": 82, "usage_type": "call"}, {"api_name": "magic.spell.Spell", "line_number": 82, "usage_type": "name"}, {"api_name": "magic.spell.Spell", "line_number": 104, "usage_type": "name"}, {"api_name": "magic.spell.Spell.__init__", "line_number": 106, "usage_type": "call"}, {"api_name": "magic.spell.Spell", "line_number": 106, "usage_type": "name"}, {"api_name": "wolfpack.addnpc", "line_number": 123, "usage_type": "call"}, {"api_name": "wolfpack.time.currenttime", "line_number": 131, "usage_type": "call"}, {"api_name": "wolfpack.time", "line_number": 131, "usage_type": "attribute"}]} +{"seq_id": "497704463", "text": "import requests\nfrom bs4 import BeautifulSoup\n\n# NOTE: For all the Data Retrieval Functions, a while loop is utilized to parse through the multiple pages\n# of data. The \"if not\" statement at the end of each loop checks the existence of data to parse through. If\n# the data does not exist, the loop will break. The race variable at the end of each loop is used to navigate\n# to the next page of available data in RuneClan.\n\n\n# ========================================= DATA RETRIEVAL FUNCTIONS =================================================\n\n\n# Opens RuneClan Hiscores page for our clan and scrapes RSN's and Total Levels. Information placed into lists\n# for use by main.\n\ndef name_and_level(race, list1, list2):\n\n while requests.get('http://www.runeclan.com/clan/the_nameless_crusade/hiscores/' + str(race) + '?skill=2'):\n\n raw_html1 = requests.get('http://www.runeclan.com/clan/the_nameless_crusade/hiscores/' + str(race) + '?skill=2')\n soup1 = BeautifulSoup(raw_html1.text, \"html.parser\")\n\n rsn = soup1.findAll(\"td\", {\"class\": \"clan_td clan_rsn2\"})\n total_lvl = soup1.findAll(\"td\", {\"class\": \"clan_td clan_xpgain_hs\"})\n\n for name in rsn:\n list1.append(name.find(\"a\").text)\n for level in total_lvl:\n list2.append(level.text.strip().replace(\",\", \"\"))\n\n if not rsn:\n break\n\n race += 1\n\n del list2[1::2]\n\n\n# Opens RuneClan Members page for our clan and scrapes the date in which players joined the clan.\n# Also scrapes the name associated with the date to match the data to the Total Level and RSN\n# scraped earlier. More on this in the combine_and_print function. All data is placed into lists\n# for use by main.\n\ndef date_joined(race, list1, list2):\n\n while requests.get('http://www.runeclan.com/clan/the_nameless_crusade/members/' + str(race) + \"?\"):\n\n raw_html2 = requests.get('http://www.runeclan.com/clan/the_nameless_crusade/members/' + str(race) + \"?\")\n soup2 = BeautifulSoup(raw_html2.text, \"html.parser\")\n\n player_name = soup2.findAll(\"span\", {\"class\": \"clan_rsn3_name\"})\n date_joined_ = soup2.findAll(\"span\", {\"class\": \"clan_rsn3_joined\"})\n\n for date in date_joined_:\n list1.append(date.text.strip())\n\n for name in player_name:\n list2.append(name.find(\"a\").text)\n\n if not date_joined_:\n break\n\n race += 1\n\n\n# Again, opens the RuneClan members page for our clan and scrapes the rank of each player. Rank is\n# placed into a list for main.\n\ndef get_rank(race, list1):\n\n while requests.get('http://www.runeclan.com/clan/the_nameless_crusade/members/' + str(race) + \"?\"):\n\n raw_html2 = requests.get('http://www.runeclan.com/clan/the_nameless_crusade/members/' + str(race) + \"?\")\n soup2 = BeautifulSoup(raw_html2.text, \"html.parser\")\n\n player_level = soup2.findAll(\"td\", {\"class\": \"clan_td clan_rank\"})\n\n for level in player_level:\n list1.append(level.text.strip())\n\n if not player_level:\n break\n\n race += 1\n\n\n# ======================================== DATA MANIPULATION FUNCTIONS ================================================\n\n\n# Takes the lists of information provided by previous functions, and matches it\n# using the sorting_funct so that all information can be iterated through together. list5 and\n# list6 are the reorganized versions of list5 and list2 (original join dates and original starting\n# ranks. These new lists are returned to main and rewrite the old lists.\n\ndef combine_and_reorganize(list1, list2, list3, list4):\n\n list5 = []\n list6 = []\n\n for name in list4:\n sorting_funct(name, list1, list3, list5, list6, list2)\n\n return list5, list6\n\n\n# Compares the name from the original list of names gathered and matches it to the same name in the second\n# list of names gathered. Then, the position of the name in the second list is recorded and used to index\n# the corresponding join date and rank. That information is then appended to the new lists to match the\n# initial list of names when printed.\n\ndef sorting_funct(value, compared_value, starting_date, end_result, end_result2, starting_rank):\n\n for index, name in enumerate(compared_value):\n if value == name:\n end_result.append(starting_date[index])\n end_result2.append(starting_rank[index])\n\n\n# Compares the rank, total level, and, where applicable, asks if the player is in discord for each player\n# and determines if an adjustment in rank needs to be made. This function DOES NOT take into account the date\n# joined aspect of our clan's ranking system. Thus, the date must be checked to match our criteria on a case\n# by case basis.\n\ndef rank_changes(list1, list2, list4):\n\n list5 = []\n\n for index, name in enumerate(list1):\n\n level = int(list2[index])\n\n if list4[index] == \"Admin\" or list4[index] == \"Organiser\" or list4[index] == \"Coordinator\" or list4[index] == \"Deputy Owner\" or list4[index] == \"Owner\":\n list5.append(\"N/A\")\n continue\n\n elif list4[index] == \"Captain\":\n response = input(\"Is \" + name + \" in the Discord? \")\n if response == \"Yes\" or response == \"yes\":\n list5.append(\"General\")\n if response == \"No\" or response == \"no\":\n list5.append(\"N/A\")\n\n elif (list4[index] == \"Recruit\" or list4[index] == \"Corporal\") and level > 599:\n list5.append(\"Sergeant\")\n\n elif list4[index] == \"Sergeant\" and level > 899:\n list5.append(\"Lieutenant\")\n\n elif list4[index] == \"Lieutenant\" and level > 1499:\n list5.append(\"Captain\")\n\n else:\n list5.append(\"N/A\")\n\n if list4[index] == list5[index]:\n list5[index] = \"N/A\"\n\n return list5\n\n\n# ======================================== DATA PRINTING FUNCTION ===================================================\n\n\n# Simply iterates through the relevant lists and prints out the data for each player. Also, adds a header to the\n# table and formats the output to be more aesthetically pleasing.\n\ndef print_to_file(list1, list2, list3, list4, list5):\n\n file = open(\"TNC_Data.txt\", \"w+\")\n\n file.write(\n \"RSN TOTAL LEVEL DATE JOINED RANK NEW RANK\\n\")\n file.write(\n \"==============================================================================================================\\n\")\n\n for i, j, k, m, n in zip(list1, list2, list3, list4, list5):\n line_print = \"{:<15} {:^15} {:<40} {:<20} {:>15}\".format(i, j, k, m, n)\n file.write(line_print + \"\\n\")\n\n file.close()\n", "sub_path": "Rank_Data.py", "file_name": "Rank_Data.py", "file_ext": "py", "file_size_in_byte": 6695, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "requests.get", "line_number": 18, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 20, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 21, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 46, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 48, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 49, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 71, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 73, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 74, "usage_type": "call"}]} +{"seq_id": "566773771", "text": "# -*- coding: utf-8 -*-\nimport sys\nfrom PyQt4 import QtCore, QtGui, uic\nimport PixivUtil\nimport threading\nimport cache\nimport PixivNotifier\nfrom bs4 import BeautifulSoup\nimport requests\n\nDlgUI, QtBaseClass = uic.loadUiType(\"IllustDialog.ui\")\nbookmarkUrl = 'https://www.pixiv.net/bookmark_add.php?type=illust&illust_id='\n\nclass MyView(QtGui.QGraphicsView):\n\tdef __init__(self, parent = None):\n\t\tsuper(MyView, self).__init__(parent)\n\t\tself.setStyleSheet(qss)\n\t\tself.setFrameShape(QtGui.QFrame.NoFrame)\n\t\tself.factor = 1.0\n\t\n\tdef wheelEvent(self, e):\n\t\tif e.delta() < 0:\n\t\t\tself.factor /= 1.2\n\t\t\tself.scale(1.2 / 1.0, 1.2 / 1.0)\n\t\telse:\n\t\t\tself.factor *= 1.2\n\t\t\tself.scale(1.0 / 1.2, 1.0 / 1.2)\n\n\tdef mouseDoubleClickEvent(self, e):\n\t\tself.scale(self.factor, self.factor)\n\t\tself.factor = 1.0\n\nclass IllustDialog(QtGui.QMainWindow, DlgUI):\n\tdef __init__(self, title, id, url, bookmark, parent = None):\n\t\tsuper(IllustDialog, self).__init__(parent)\n\t\tself.setupUi(self)\n\t\tself.setWindowTitle(title)\n\t\tself.id = str(id)\n\t\tself.url = str(url)\n\t\tself.bookmarked = bookmark\n\t\tself.bookmark.clicked.connect(self.addBookmark)\n\t\tself.image = MyView(self)\n\t\tself.image.setGeometry(0, 0, 500, 500)\n\t\tself.refreshBookmarkState()\n\n\tdef setImage(self, img):\n\t\tscene = QtGui.QGraphicsScene()\n\t\tscene.addPixmap(img)\n\t\tself.image.setScene(scene)\n\t\tself.image.scale(1.0, 1.0)\n\n\tdef resizeEvent(self, e):\n\t\tself.control.setGeometry(0, e.size().height() - self.control.geometry().height(), \n\t\t\te.size().width(), self.control.geometry().height())\n\t\tself.image.setGeometry(0, 0, e.size().width(), \n\t\t\te.size().height() - self.control.geometry().height())\n\n\tdef addBookmark(self):\n\t\tif not self.bookmarked:\n\t\t\tpageHtml = PixivUtil.get(PixivUtil.pixiv.getServer(), \n\t\t\t\tbookmarkUrl + self.id, headers = PixivUtil.create_header(self.url)).text\n\t\t\tpage = BeautifulSoup(pageHtml, 'lxml')\n\t\t\ttoken = str(page.find('input', attrs = {'name': 'tt'})['value'])\n\t\t\tform = {\n\t\t\t\t'mode': 'add',\n\t\t\t\t'tt': token, \n\t\t\t\t'id': self.id,\n\t\t\t\t'type': 'illust',\n\t\t\t\t'from_sid': '',\n\t\t\t\t'comment': '',\n\t\t\t\t'tag': '',\n\t\t\t\t'restrict': '0'\n\t\t\t}\n\t\telse:\n\t\t\tpass\n\t\tPixivUtil.post(PixivUtil.pixiv.getServer(), \n\t\t\tbookmarkUrl + self.id, headers = PixivUtil.create_header(self.url),\n\t\t\tdata = form\n\t\t)\n\t\tself.bookmarked = not self.bookmarked\n\t\tself.refreshBookmarkState()\n\n\tdef refreshBookmarkState(self):\n\t\tself.bookmark.setText(u'☆' if not self.bookmarked else u'★')\n\t\t\nqss = \"\"\"\nQGraphicsView\n{\n\tbackground: transparent;\n}\n\nQScrollBar:vertical\n{\n width:12px;\n background:rgba(0,0,0,0%);\n margin:0px,0px,0px,0px;\n padding-top:13px;\n padding-bottom:13px;\n}\nQScrollBar::handle:vertical\n{\n width:12px;\n background:rgba(0,0,0,25%);\n border-radius:6px;\n min-height:20;\n}\nQScrollBar::handle:vertical:hover\n{\n width:12px;\n background:rgba(0,0,0,50%); \n border-radius:6px;\n min-height:20;\n}\nQScrollBar::add-line:vertical \n{\n height:13px;width:12px;\n border-image:url(:/images/a/3.png);\n subcontrol-position:bottom;\n}\nQScrollBar::sub-line:vertical \n{\n height:13px;width:12px;\n border-image:url(:/images/a/1.png);\n subcontrol-position:top;\n}\nQScrollBar::add-line:vertical:hover \n{\n height:13px;width:12px;\n border-image:url(:/images/a/4.png);\n subcontrol-position:bottom;\n}\nQScrollBar::sub-line:vertical:hover \n{\n height:13px;width:12px;\n border-image:url(:/images/a/2.png);\n subcontrol-position:top;\n}\nQScrollBar::add-page:vertical,QScrollBar::sub-page:vertical \n{\n background:rgba(0,0,0,10%);\n border-radius:6px;\n}\n\nQScrollBar:horizontal\n{\n height:12px;\n background:rgba(0,0,0,0%);\n margin:0px,0px,0px,0px;\n padding-left:13px;\n padding-right:13px;\n}\nQScrollBar::handle:horizontal\n{\n height:12px;\n background:rgba(0,0,0,25%);\n border-radius:6px;\n min-width:20;\n}\nQScrollBar::handle:horizontal:hover\n{\n height:12px;\n background:rgba(0,0,0,50%); \n border-radius:6px;\n min-width:20;\n}\nQScrollBar::add-line:horizontal\n{\n height:12px;width:13px;\n border-image:url(:/images/a/3.png);\n subcontrol-position:right;\n}\nQScrollBar::sub-line:horizontal \n{\n height:12px;width:13px;\n border-image:url(:/images/a/1.png);\n subcontrol-position:left;\n}\nQScrollBar::add-line:horizontal:hover \n{\n height:12px;width:13px;\n border-image:url(:/images/a/4.png);\n subcontrol-position:right;\n}\nQScrollBar::sub-line:horizontal:hover \n{\n height:12px;width:13px;\n border-image:url(:/images/a/2.png);\n subcontrol-position:left;\n}\nQScrollBar::add-page:horizontal,QScrollBar::sub-page:horizontal\n{\n background:rgba(0,0,0,10%);\n border-radius:6px;\n}\n\"\"\"\n\nif __name__ == \"__main__\":\n\tapp = QtGui.QApplication(sys.argv)\n\tw = IllustDialog()\n\tw.show()\n\tsys.exit(app.exec_())", "sub_path": "IllustDialog.py", "file_name": "IllustDialog.py", "file_ext": "py", "file_size_in_byte": 4749, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "PyQt4.uic.loadUiType", "line_number": 11, "usage_type": "call"}, {"api_name": "PyQt4.uic", "line_number": 11, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGraphicsView", "line_number": 14, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 14, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QFrame", "line_number": 18, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 18, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QMainWindow", "line_number": 33, "usage_type": "attribute"}, {"api_name": "PyQt4.QtGui", "line_number": 33, "usage_type": "name"}, {"api_name": "PyQt4.QtGui.QGraphicsScene", "line_number": 47, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 47, "usage_type": "name"}, {"api_name": "PixivUtil.get", "line_number": 60, "usage_type": "call"}, {"api_name": "PixivUtil.pixiv.getServer", "line_number": 60, "usage_type": "call"}, {"api_name": "PixivUtil.pixiv", "line_number": 60, "usage_type": "attribute"}, {"api_name": "PixivUtil.create_header", "line_number": 61, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 62, "usage_type": "call"}, {"api_name": "PixivUtil.post", "line_number": 76, "usage_type": "call"}, {"api_name": "PixivUtil.pixiv.getServer", "line_number": 76, "usage_type": "call"}, {"api_name": "PixivUtil.pixiv", "line_number": 76, "usage_type": "attribute"}, {"api_name": "PixivUtil.create_header", "line_number": 77, "usage_type": "call"}, {"api_name": "PyQt4.QtGui.QApplication", "line_number": 198, "usage_type": "call"}, {"api_name": "PyQt4.QtGui", "line_number": 198, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 198, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 201, "usage_type": "call"}]} +{"seq_id": "621627387", "text": "from typing import List, Tuple\nfrom .poker import show_middle_card\n\n\nasync def summary_phase(players, play_time, players_status: List[str], ctx):\n msg = ''\n for player, status in zip(players, players_status):\n if status == 'f':\n status_msg = 'หมอบ'\n elif status == 'p':\n status_msg = 'ผ่าน'\n elif status == 'b':\n status_msg = 'เกทับ'\n\n msg += str(player) + ' ' + status_msg + ' '\n await ctx.send(f'เฟสที่ {play_time+1}/3\\nสรุปผล {msg}\\n'+'-'*15)\n\n\nasync def pass_bet_fold(players, players_status, count_fold, max_current_bet, ctx, client):\n def check_pbf(msg):\n return msg.author == player and msg.channel == ctx.channel and \\\n msg.content.lower() in [\"p\", \"b\", \"f\"]\n\n global check_bet\n\n def check_bet(msg_bet):\n return msg_bet.author == msg_author\n\n while True:\n for idx_player, player in enumerate(players):\n global msg_author\n print(players_status)\n if players_status[idx_player] == 'f':\n continue\n\n await ctx.send(f'คุณ {str(player)} โปรดเลือก P/B/F')\n msg = await client.wait_for('message', check=check_pbf)\n msg_content = msg.content.lower()\n msg_author = msg.author\n\n if msg_content == 'f': # หมอบ\n count_fold += 1\n idx_player_drop.append(idx_player)\n players_status[idx_player] = 'f'\n await ctx.send(f'{str(player)} หมอบ')\n\n elif msg_content == 'p':\n players_status[idx_player] = 'p'\n await ctx.send(f'{str(player)} ผ่าน')\n\n elif msg_content == 'b':\n while True:\n await ctx.send(f'คุณ {str(player)} โปรดเดิมพัน')\n\n msg_bet = await client.wait_for('message', check=check_bet)\n msg_bet_content = msg_bet.content\n\n if not msg_bet_content.isnumeric():\n await ctx.send(f'โปรดใช้ตัวเลข')\n continue\n\n if int(msg_bet_content) < max_current_bet:\n await ctx.send(f'โปรดเดิมพันให้สูงกว่าหรือเท่ากับ {max_current_bet}')\n continue\n\n max_current_bet = int(msg_bet_content)\n await ctx.send(f'คุณ {str(msg_bet.author)} ได้เดิมพันเพิ่มเป็น {max_current_bet}')\n players_status[idx_player] = 'b'\n\n break\n\n if count_fold == len(players)-1:\n return True\n if 'b' not in players_status:\n break\n\n\nasync def loop_pass_bet_fold(players, player_cards: List[Tuple[int, int]], middle_cards, client, ctx):\n global player, idx_player_drop, max_current_bet, count_fold\n count_fold = 0\n idx_player_drop = []\n max_current_bet = -1\n\n players_status = ['p'] * len(players)\n await ctx.send('เข้าสู่ขั้นตอนการ bet\\nพิม P หรือ p เพื่อผ่าน\\nB หรือ b เพื่อลงแต้มเพิ่ม\\nF หรือ f เพื่อหมอบ')\n\n for play_time in range(3):\n await ctx.send(f'เฟสที่ {play_time+1}/3')\n found_winner = await pass_bet_fold(players, players_status, count_fold,\n max_current_bet, ctx, client)\n await summary_phase(players, play_time, players_status, ctx)\n if found_winner:\n await show_middle_card(middle_cards, ctx, True, False)\n await show_middle_card(middle_cards, ctx, True, True)\n return players_status\n\n if play_time == 0:\n await show_middle_card(middle_cards, ctx, True, False)\n elif play_time == 1:\n await show_middle_card(middle_cards, ctx, True, True)\n\n return players_status\n", "sub_path": "src/poker/user_action.py", "file_name": "user_action.py", "file_ext": "py", "file_size_in_byte": 4145, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "typing.List", "line_number": 5, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 78, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 78, "usage_type": "name"}, {"api_name": "poker.show_middle_card", "line_number": 93, "usage_type": "call"}, {"api_name": "poker.show_middle_card", "line_number": 94, "usage_type": "call"}, {"api_name": "poker.show_middle_card", "line_number": 98, "usage_type": "call"}, {"api_name": "poker.show_middle_card", "line_number": 100, "usage_type": "call"}]} +{"seq_id": "378505834", "text": "import random\nimport statistics\nimport numpy as np\n\n\nclass ModelBayesFast:\n def __init__(self, n_people, n_buckets, s_max, options):\n self.n_people = n_people\n self.n_buckets = n_buckets\n self.buckets = np.arange(-n_buckets, n_buckets + 1)\n self.buckets_total = 2 * n_buckets + 1\n self.s_max = s_max\n self.s_max_total = 2 * s_max + 1\n self.scores = np.arange(-s_max, s_max + 1)\n self.options = options\n self.proba_table = self.compute_proba_table(n_buckets, s_max, options)\n self.initializor = {i: 1/(2*n_buckets + 1) for i in range(-n_buckets, n_buckets + 1)}\n self.probabilities = [ 1/self.buckets_total * np.ones((self.buckets_total,)) for _ in range(n_people)]\n self.esperance = []\n self.mean = 0\n self.update_stats()\n\n def compute_proba_table(self, n_buckets, s_max, options):\n def fn(l1, l2, s, rho, bpgd):\n ecart = int(abs((l1 - l2)/bpgd - s))\n if ecart > 5:\n return 0.000001\n return [24, 22, 10, 4.5, 1.5, 0.5][ecart]\n rho = options['rho']\n bpgd = options['bucket_per_gd']\n triplet = {(l1, l2, s): max(fn(l1, l2, s, rho, bpgd), 0.0000001) if abs(l1 - l2 - s) < 6 else 0.0000001\n for l1 in range(-n_buckets, n_buckets + 1)\n for l2 in range(-n_buckets, n_buckets + 1)\n for s in range(-s_max, s_max + 1)}\n sums = {(l1, l2): 0\n for l1 in range(-n_buckets, n_buckets + 1)\n for l2 in range(-n_buckets, n_buckets + 1)}\n for (l1, l2, _), p in triplet.items():\n sums[(l1, l2)] += p\n triplet = {(l1, l2, s): v / sums[(l1, l2)] for (l1, l2, s), v in triplet.items()}\n np_triplet = np.zeros((self.s_max_total, self.buckets_total, self.buckets_total))\n for (l1, l2, s), v in triplet.items():\n np_triplet[s + self.s_max][l1 + self.n_buckets][l2 + self.n_buckets] = v\n return np_triplet\n\n def update_stats(self):\n self.esperance = [np.sum(self.buckets * self.probabilities[people]) for people in range(self.n_people)]\n self.mean = statistics.mean(self.esperance)\n\n def adjust_mean(self):\n shift = round(self.mean)\n if shift == 0:\n return\n for people, probability in enumerate(self.probabilities):\n new_values = np.zeros(probability.shape)\n l = len(new_values)\n self.probabilities[people][:l - shift] = new_values[shift:]\n self.probabilities[people][l - shift:] = new_values[:shift]\n self.update_stats()\n\n def proba_score(self, user_1, user_2):\n p_user1 = self.probabilities[user_1]\n p_user1.shape = (len(p_user1), 1)\n p_user2 = self.probabilities[user_2]\n return {\n s: np.sum(self.proba_table[s + self.s_max] * p_user1 * p_user2) for s in range(-self.s_max, self.s_max + 1)\n }\n\n def print(self, teams, keep=set()):\n print(\"Team |\", end=\"\")\n print(\" # |\", end=\"\")\n for bucket in range(-self.n_buckets, self.n_buckets + 1):\n print(\"{0:^4} | \".format(bucket), end='')\n print()\n for people in range(self.n_people):\n if len(keep) > 0 and people not in keep:\n continue\n total = 0\n average = 0\n print(\"{0:^25}|\".format(teams[people]), end=\"\")\n print(\"{0:^5}|\".format(people), end=\"\")\n for bucket in range(-self.n_buckets, self.n_buckets + 1):\n print(\"{0:^4.1f} | \".format(100 * self.probabilities[people][bucket + self.n_buckets]), end='')\n total += 100 * self.probabilities[people][bucket + self.n_buckets]\n average += self.probabilities[people][bucket + self.n_buckets] * bucket\n print(\"{0:^4.1f} | \".format(total), end='')\n print(\"{0:^4.1f} | \".format(average), end='')\n print()\n print(\"Mean {:.2f}\".format(self.mean))\n\n def account_for(self, d):\n user_1, user_2, score = d\n p_user1 = self.probabilities[user_1]\n p_user1.shape = (len(p_user1), 1)\n p_user2 = self.probabilities[user_2]\n probabilities = self.proba_table[score + self.s_max] * p_user1 * p_user2\n # normalize\n s = np.sum(probabilities)\n probabilities = probabilities / s\n # accumulate probabilities per user\n self.probabilities[user_1] = np.sum(probabilities, axis = 1)\n self.probabilities[user_2] = np.sum(probabilities, axis = 0)\n self.update_stats()\n self.adjust_mean()\n\n\ndef draw_ps(ps):\n return random.choices(list(ps.keys()), weights=list(ps.values()))[0]\n\n'''\n r = random.random()\n s, t = 0, 0\n for s, p in ps.items():\n t += p\n if t > r:\n return s\n return s\n'''\n\ndef print_p(proba, threshold=0.02):\n print(\"Probabilities by score\")\n [print(\"{:^5d}|\".format(i), end='') for i, p in proba.items() if p > threshold]\n print()\n [print(\"{:^5.0f}|\".format(p * 100), end='') for i, p in proba.items() if p > threshold]\n print()\n", "sub_path": "modelbayesfast.py", "file_name": "modelbayesfast.py", "file_ext": "py", "file_size_in_byte": 5145, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.arange", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 47, "usage_type": "call"}, {"api_name": "statistics.mean", "line_number": 48, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 102, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "397310369", "text": "from __future__ import print_function\nimport os\nimport sys\nimport time\nfrom datetime import timedelta\nimport pickle\nimport zipfile\nimport logging\n\nimport numpy as np\nimport pandas as pd\nimport parmap\n\nimport matplotlib\nmatplotlib.use('Agg')\n\n\nfrom model_ingest import ingest_spreadsheet\nfrom sifraclasses import Scenario\nfrom sifra.modelling.hazard_levels import HazardLevels\n\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\nfrom colorama import Fore, Back, Style\n\n\ndef run_scenario(config_file):\n \"\"\"\n Run a scenario by constructing a facility, and executing a scenario, with\n the parameters read from the config file.\n :param config_file: Scenario setting values and the infrastructure configuration file path\n :return: None\n \"\"\"\n # Construct the scenario object\n print(Style.BRIGHT + Fore.GREEN +\n \"\\nLoading scenario config... \" +\n Style.RESET_ALL, end='')\n\n scenario = Scenario(config_file)\n print(Style.BRIGHT + Fore.GREEN + \"Done.\" +\n \"\\nInitiating model run...\\n\" + Style.RESET_ALL)\n code_start_time = time.time()\n\n # `IFSystem` object that contains a list of components\n infrastructure = ingest_spreadsheet(config_file)\n\n post_processing_list = calculate_response(scenario, infrastructure)\n # After the response has been calculated the post processing\n # will record the results\n post_processing(infrastructure, scenario, post_processing_list)\n\n\ndef calculate_response(scenario, infrastructure):\n \"\"\"\n The response will be calculated by creating the hazard_levels,\n iterating through the range of hazards and calling the infrastructure systems\n expose_to method. This will return the results of the infrastructure to each hazard level\n exposure. A parameter in the scenario file determines whether the parmap.map function spawns threads\n that will perform parallel calculations.\n :param scenario: Parameters for the simulation.\n :param infrastructure: Model of the infrastructure.\n :return: List of results for each hazard level.\n \"\"\"\n hazard_levels = HazardLevels(scenario) # Hazard intensity Value, &\n # Parameter, Unit\n\n code_start_time = time.time() # start of the overall response calculation\n # capture the results from the map call in a list\n hazard_level_response = []\n # Use the parallel option in the scenario to determine how to run\n hazard_level_response.extend(parmap.map(run_para_scen,\n hazard_levels.hazard_range(),\n infrastructure,\n scenario,\n parallel=scenario.run_parallel_proc))\n # combine the responses into one list\n post_processing_list = [{}, # hazard level vs component damage state index\n {}, # hazard level vs infrastructure output\n {}, # hazard level vs component response\n [], # infrastructure output for sample\n [], # infrastructure econ loss for sample\n []] # infrastructure output given recovery\n # iterate through the hazard levels\n for hazard_level_values in hazard_level_response:\n # iterate through the hazard level lists\n for key, value_list in hazard_level_values.items():\n for list_number in range(6):\n # the first three lists are dicts\n if list_number <= 2:\n post_processing_list[list_number]['%0.3f' % np.float(key)] \\\n = value_list[list_number]\n else:\n # the last three are lists\n post_processing_list[list_number]. \\\n append(value_list[list_number])\n\n # Convert the last 3 lists into arrays\n for list_number in range(3, 6):\n post_processing_list[list_number] \\\n = np.array(post_processing_list[list_number])\n\n # Convert the calculated output array into the correct format\n post_processing_list[3] = np.sum(post_processing_list[3], axis=2).transpose()\n post_processing_list[4] = post_processing_list[4].transpose()\n post_processing_list[5] = np.transpose(post_processing_list[5], axes=(1, 0, 2))\n\n elapsed = timedelta(seconds=(time.time() - code_start_time))\n logging.info(\"[ Run time: %s ]\\n\" % str(elapsed))\n\n return post_processing_list\n\n\ndef run_para_scen(hazard_level, infrastructure, scenario):\n \"\"\"\n The parmap.map function requires a module level function as a parameter.\n So this function satisfies that requirement by calling the infrastructure's\n exponse_to method within this one.\n :param hazard_level: The hazard level that the infrastructure will be exposed to\n :param infrastructure: The infrastructure model that is being simulated\n :param scenario: The Parameters for the simulation\n :return: List of results of the simulation\n \"\"\"\n return infrastructure.expose_to(hazard_level, scenario)\n\n\n# ****************************************************************************\n# BEGIN POST-PROCESSING ...\n# ****************************************************************************\n\ndef plot_mean_econ_loss(sc, economic_loss_array):\n \"\"\"Draws and saves a boxplot of mean economic loss\"\"\"\n\n hazvals_ext = [[str(i)] * sc.num_samples\n for i in list(sc.hazard_intensity_vals)]\n x1 = np.ndarray.flatten(np.array(hazvals_ext))\n\n smpl = range(1, sc.num_samples+1, 1)\n x2 = np.array(smpl * sc.num_hazard_pts)\n\n arrays = [x1, x2]\n econ_loss = np.array(economic_loss_array)\n econ_loss = np.ndarray.flatten(econ_loss.transpose())\n econ_loss_flat = np.ndarray.flatten(econ_loss)\n\n econ_loss_df = pd.DataFrame(econ_loss_flat, index=arrays)\n econ_loss_df.index.names = ['Hazard Intensity', 'Sample Num']\n econ_loss_df.columns = ['Econ Loss Ratio']\n\n fig = plt.figure(figsize=(9, 5), facecolor='white')\n sns.set(style='ticks', palette='Set2')\n # whitesmoke='#F5F5F5', coral='#FF7F50'\n ax = sns.boxplot(x=x1, y='Econ Loss Ratio', data=econ_loss_df,\n linewidth=0.8, color='whitesmoke',\n showmeans=True,\n meanprops=dict(marker='o',\n markeredgecolor='coral',\n markerfacecolor='coral')\n )\n\n sns.despine(bottom=False, top=True, left=True, right=True, offset=10)\n ax.spines['bottom'].set_linewidth(0.8)\n ax.spines['bottom'].set_color('#555555')\n\n ax.yaxis.grid(True, which=\"major\", linestyle='-',\n linewidth=0.4, color='#B6B6B6')\n\n ax.tick_params(axis='x', bottom='on', top='off',\n width=0.8, labelsize=8, pad=5, color='#555555')\n ax.tick_params(axis='y', left='off', right='off',\n width=0.8, labelsize=8, pad=5, color='#555555')\n\n ax.set_xticklabels(sc.hazard_intensity_vals)\n intensity_label \\\n = sc.intensity_measure_param+' ('+sc.intensity_measure_unit+')'\n ax.set_xlabel(intensity_label, labelpad=9, size=10)\n ax.set_ylabel('Loss Fraction (%)', labelpad=9, size=10)\n\n ax.set_title('Loss Ratio', loc='center', y=1.04)\n ax.title.set_fontsize(12)\n\n figfile = os.path.join(sc.output_path, 'fig_lossratio_boxplot.png')\n plt.savefig(figfile, format='png', bbox_inches='tight', dpi=300)\n plt.close(fig)\n\n\ndef post_processing(infrastructure, scenario, response_list):\n \"\"\"\n Post simulation processing.\n\n After the simulation has run the results are aggregated, saved\n and the system fragility is calculated.\n :param infrastructure: The infrastructure being simulated\n :param scenario: Scenario values for the simulation\n :param response_list: Values from the simulation\n :return: None\n \"\"\"\n write_system_response(response_list, scenario)\n loss_by_comp_type(response_list, infrastructure, scenario)\n economic_loss_array = response_list[4]\n plot_mean_econ_loss(scenario, economic_loss_array)\n pe_by_component_class(response_list, infrastructure, scenario)\n\ndef write_system_response(response_list, scenario):\n\n # ------------------------------------------------------------------------\n # 'ids_comp_vs_haz' is a dict of numpy arrays\n # We pickle it for archival. But the file size can get very large.\n # So we zip it for archival and delete the original\n # ------------------------------------------------------------------------\n idshaz = os.path.join(scenario.raw_output_dir, 'ids_comp_vs_haz.pickle')\n id_comp_vs_haz = response_list[0]\n with open(idshaz, 'w') as handle:\n for response_key in sorted(id_comp_vs_haz.keys()):\n pickle.dump({response_key: id_comp_vs_haz[response_key]}, handle)\n idshaz_zip = os.path.join(scenario.raw_output_dir, 'ids_comp_vs_haz.zip')\n zipmode = zipfile.ZIP_DEFLATED\n with zipfile.ZipFile(idshaz_zip, 'w', zipmode) as zip:\n zip.write(idshaz)\n os.remove(idshaz)\n\n # ------------------------------------------------------------------------\n # System output file (for given hazard transfer parameter value)\n # ------------------------------------------------------------------------\n sys_output_dict = response_list[1]\n sod_pkl = os.path.join(scenario.raw_output_dir,\n 'sys_output_dict.pickle')\n with open(sod_pkl, 'w') as handle:\n for response_key in sorted(sys_output_dict.keys()):\n pickle.dump({response_key: sys_output_dict[response_key]},\n handle)\n\n sys_output_df = pd.DataFrame(sys_output_dict)\n sys_output_df = sys_output_df.transpose()\n sys_output_df.index.name = 'Hazard Intensity'\n\n outfile_sysoutput = os.path.join(scenario.output_path,\n 'system_output_vs_haz_intensity.csv')\n sys_output_df.to_csv(outfile_sysoutput,\n sep=',',\n index_label=[sys_output_df.index.name])\n\n # ------------------------------------------------------------------------\n # Hazard response for component instances, i.e. components as-installed\n # ------------------------------------------------------------------------\n component_resp_dict = response_list[2]\n crd_pkl = os.path.join(scenario.raw_output_dir,\n 'component_resp_dict.pickle')\n with open(crd_pkl, 'w') as handle:\n for response_key in sorted(component_resp_dict.keys()):\n pickle.dump({response_key: component_resp_dict[response_key]},\n handle)\n\n\ndef loss_by_comp_type(response_list, infrastructure, scenario):\n \"\"\"\n Aggregate the economic loss statistics by component type.\n :param response_list: list of simulation results\n :param infrastructure: simulated infrastructure\n :param scenario: values used in simulation\n :return: None\n \"\"\"\n # ------------------------------------------------------------------------\n # Loss calculations by Component Type\n # ------------------------------------------------------------------------\n tp_ct = []\n for comp_type in infrastructure.get_component_types():\n tp_ct.extend(\n ((comp_type, 'loss_mean'), (comp_type, 'loss_std'),\n (comp_type, 'loss_tot'), (comp_type, 'func_mean'),\n (comp_type, 'func_std'))\n )\n\n mindex = pd.MultiIndex.from_tuples(\n tp_ct,\n names=['component_type', 'response'])\n comptype_resp_df = pd.DataFrame(\n index=mindex,\n columns=[scenario.hazard_intensity_str])\n comptype_resp_dict = comptype_resp_df.to_dict()\n\n component_resp_dict = response_list[2]\n for p in scenario.hazard_intensity_str:\n\n for component_type in infrastructure.get_component_types():\n\n components_of_type \\\n = list(infrastructure.get_components_for_type(component_type))\n\n ct_loss_mean_list \\\n = [component_resp_dict[p][(comp_id, 'loss_mean')]\n for comp_id in components_of_type]\n\n comptype_resp_dict[p][(component_type, 'loss_mean')] \\\n = np.mean(ct_loss_mean_list)\n\n ct_loss_mean_list \\\n = [component_resp_dict[p][(comp_id, 'loss_mean')]\n for comp_id in components_of_type]\n\n comptype_resp_dict[p][(component_type, 'loss_tot')] \\\n = np.sum(ct_loss_mean_list)\n\n ct_loss_std_list \\\n = [component_resp_dict[p][(comp_id, 'loss_std')]\n for comp_id in components_of_type]\n\n comptype_resp_dict[p][(component_type, 'loss_std')] \\\n = np.mean(ct_loss_std_list)\n\n ct_func_mean_list \\\n = [component_resp_dict[p][(comp_id, 'func_mean')]\n for comp_id in components_of_type]\n\n comptype_resp_dict[p][(component_type, 'func_mean')] \\\n = np.mean(ct_func_mean_list)\n\n ct_func_std_list \\\n = [component_resp_dict[p][(comp_id, 'func_std')]\n for comp_id in components_of_type]\n\n comptype_resp_dict[p][(component_type, 'func_std')] \\\n = np.mean(ct_func_std_list)\n\n ct_num_failures_list \\\n = [component_resp_dict[p][(comp_id, 'num_failures')]\n for comp_id in components_of_type]\n\n comptype_resp_dict[p][(component_type, 'num_failures')] \\\n = np.mean(ct_num_failures_list)\n\n # ------------------------------------------------------------------------\n # Calculating system fragility:\n economic_loss_array = response_list[4]\n sys_frag = np.zeros_like(economic_loss_array, dtype=int)\n if_system_damage_states = infrastructure.get_dmg_scale_bounds(scenario)\n for j, hazard_level in enumerate(scenario.hazard_intensity_str):\n for i in range(scenario.num_samples):\n # system output and economic loss\n sys_frag[i, j] = \\\n np.sum(economic_loss_array[i, j] > if_system_damage_states)\n\n # Calculating Probability of Exceedence:\n pe_sys_econloss = np.zeros(\n (len(infrastructure.get_system_damage_states()),\n scenario.num_hazard_pts)\n )\n for j in range(scenario.num_hazard_pts):\n for i in range(len(infrastructure.get_system_damage_states())):\n pe_sys_econloss[i, j] = \\\n np.sum(sys_frag[:, j] >= i) / float(scenario.num_samples)\n\n # --- Output File --- response of each COMPONENT TYPE to hazard ---\n outfile_comptype_resp = os.path.join(\n scenario.output_path, 'comptype_response.csv')\n comptype_resp_df = pd.DataFrame(comptype_resp_dict)\n comptype_resp_df.index.names = ['component_type', 'response']\n comptype_resp_df.to_csv(\n outfile_comptype_resp, sep=',',\n index_label=['component_type', 'response']\n )\n\n # --- Output File --- mean loss of component type ---\n outfile_comptype_loss = os.path.join(\n scenario.output_path, 'comptype_meanloss.csv')\n comptype_loss_df = comptype_resp_df.iloc[\n comptype_resp_df.index.get_level_values(1) == 'loss_mean']\n comptype_loss_df.reset_index(level='response', inplace=True)\n comptype_loss_df = comptype_loss_df.drop('response', axis=1)\n comptype_loss_df.to_csv(\n outfile_comptype_loss, sep=',',\n index_label=['component_type']\n )\n\n # --- Output File --- mean failures for component types ---\n outfile_comptype_failures = os.path.join(\n scenario.output_path, 'comptype_meanfailures.csv')\n comptype_failure_df = comptype_resp_df.iloc[\n comptype_resp_df.index.get_level_values(1) == 'num_failures']\n comptype_failure_df.reset_index(level='response', inplace=True)\n comptype_failure_df = comptype_failure_df.drop('response', axis=1)\n comptype_failure_df.to_csv(\n outfile_comptype_failures, sep=',',\n index_label=['component_type']\n )\n\n np.save(\n os.path.join(scenario.raw_output_dir, 'sys_frag.npy'),\n sys_frag\n )\n\n np.save(\n os.path.join(scenario.raw_output_dir, 'pe_sys_econloss.npy'),\n pe_sys_econloss\n )\n\n\ndef pe_by_component_class(response_list, infrastructure, scenario):\n \"\"\"\n Calculated probability of exceedence based on component classes\n :param response_list:\n :param infrastructure:\n :param scenario:\n :return:\n \"\"\"\n # ------------------------------------------------------------------------\n # For Probability of Exceedence calculations based on component failures\n # ------------------------------------------------------------------------\n #\n # Damage state boundaries for Component Type Failures (Substations) are\n # based on HAZUS MH MR3, p 8-66 to 8-68\n #\n # ------------------------------------------------------------------------\n\n cp_classes_in_system = np.unique(list(infrastructure.get_component_class_list()))\n\n cp_class_map = {k: [] for k in cp_classes_in_system}\n for comp_id, component in infrastructure.components.items():\n cp_class_map[component.component_class].append(component)\n\n # ------------------------------------------------------------------------\n # For Probability of Exceedence calculations based on component failures:\n # Damage state boundaries for Component Type Failures (Substations) are\n # based on HAZUS MH MR3, p 8-66 to 8-68\n # ------------------------------------------------------------------------\n if infrastructure.system_class == 'Substation':\n cp_classes_costed = \\\n [x for x in cp_classes_in_system\n if x not in infrastructure.uncosted_classes]\n\n # --- System fragility - Based on Failure of Component Classes ---\n comp_class_failures = \\\n {cc: np.zeros((scenario.num_samples, scenario.num_hazard_pts))\n for cc in cp_classes_costed}\n\n comp_class_frag = \\\n {cc: np.zeros((scenario.num_samples, scenario.num_hazard_pts))\n for cc in cp_classes_costed}\n\n for j, hazard_level in enumerate(HazardLevels(scenario)):\n for i in range(scenario.num_samples):\n for compclass in cp_classes_costed:\n for c in cp_class_map[compclass]:\n comp_class_failures[compclass][i, j] += \\\n response_list[hazard_level.hazard_intensity]\\\n [i, infrastructure.components[c]]\n comp_class_failures[compclass][i, j] /= \\\n len(cp_class_map[compclass])\n\n comp_class_frag[compclass][i, j] = \\\n np.sum(comp_class_failures[compclass][i, j] > \\\n infrastructure.ds_lims_compclasses[compclass])\n\n # Probability of Exceedence -- Based on Failure of Component Classes\n pe_sys_cpfailrate = np.zeros(\n (len(infrastructure.sys_dmg_states), scenario.num_hazard_pts)\n )\n for p in range(scenario.num_hazard_pts):\n for d in range(len(infrastructure.sys_dmg_states)):\n ds_ss_ix = []\n for compclass in cp_classes_costed:\n ds_ss_ix.append(\n np.sum(comp_class_frag[compclass][:, p] >= d) /\n float(scenario.num_samples)\n )\n pe_sys_cpfailrate[d, p] = np.median(ds_ss_ix)\n\n # --- Save prob exceedance data as npy ---\n np.save(os.path.join(scenario.raw_output_dir, 'pe_sys_cpfailrate.npy'),\n pe_sys_cpfailrate)\n\n # ------------------------------------------------------------------------\n # Validate damage ratio of the system\n # ------------------------------------------------------------------------\n\n exp_damage_ratio = np.zeros((len(infrastructure.components),\n scenario.num_hazard_pts))\n for l, hazard_level in enumerate(HazardLevels(scenario).hazard_range()):\n # compute expected damage ratio\n for j, component in enumerate(infrastructure.components.values()):\n pb = pe2pb(component.expose_to(hazard_level, scenario)[1:])\n dr = np.array([component.frag_func.damage_states[ds].damage_ratio\n for ds in infrastructure.sys_dmg_states])\n cf = component.cost_fraction\n loss_list = dr * cf\n exp_damage_ratio[j, l] = np.sum(pb * loss_list)\n\n # ------------------------------------------------------------------------\n # Time to Restoration of Full Capacity\n # ------------------------------------------------------------------------\n\n threshold = 0.99\n required_time = []\n output_array_given_recovery = response_list[5]\n for j in range(scenario.num_hazard_pts):\n cpower = np.mean(output_array_given_recovery[:, j, :], axis=0)\\\n / infrastructure.get_nominal_output()\n temp = cpower > threshold\n if sum(temp) > 0:\n required_time.append(np.min(scenario.restoration_time_range[temp]))\n else:\n required_time.append(scenario.restore_time_max)\n\n # ------------------------------------------------------------------------\n # Write analytical outputs to file\n # ------------------------------------------------------------------------\n\n # --- Output File --- summary output ---\n outfile_sys_response = os.path.join(\n scenario.output_path, 'system_response.csv')\n out_cols = ['PGA',\n 'Economic Loss',\n 'Mean Output',\n 'Days to Full Recovery']\n\n # create the arrays\n comp_response_list = response_list[2]\n economic_loss_array = response_list[4]\n calculated_output_array = response_list[3]\n\n outdat = {out_cols[0]: scenario.hazard_intensity_vals,\n out_cols[1]: np.mean(economic_loss_array, axis=0),\n out_cols[2]: np.mean(calculated_output_array, axis=0),\n out_cols[3]: required_time}\n df = pd.DataFrame(outdat)\n df.to_csv(\n outfile_sys_response, sep=',',\n index=False, columns=out_cols\n )\n\n # --- Output File --- response of each COMPONENT to hazard ---\n outfile_comp_resp = os.path.join(scenario.output_path,\n 'component_response.csv')\n component_resp_df = pd.DataFrame(comp_response_list)\n component_resp_df.index.names = ['component_id', 'response']\n component_resp_df.columns = scenario.hazard_intensity_str\n component_resp_df.to_csv(\n outfile_comp_resp, sep=',',\n index_label=['component_id', 'response']\n )\n\n # --- Output File --- mean loss of component ---\n outfile_comp_loss = os.path.join(scenario.output_path,\n 'component_meanloss.csv')\n component_loss_df = component_resp_df.iloc\\\n [component_resp_df.index.get_level_values(1) == 'loss_mean']\n component_loss_df.reset_index(level='response', inplace=True)\n component_loss_df = component_loss_df.drop('response', axis=1)\n component_loss_df.to_csv(\n outfile_comp_loss, sep=',',\n index_label=['component_id']\n )\n\n # # --- Output File --- DataFrame of mean failures per component CLASS ---\n # outfile_compclass_failures = os.path.join(\n # output_path, 'comp_class_meanfailures.csv')\n # compclass_failure_df.to_csv(outfile_compclass_failures, sep=',',\n # index_label=['component_class'])\n\n # ------------------------------------------------------------------------\n # *** Saving vars ***\n # ------------------------------------------------------------------------\n\n if scenario.save_vars_npy:\n np.save(\n os.path.join(scenario.raw_output_dir, 'economic_loss_array.npy'),\n economic_loss_array\n )\n\n np.save(\n os.path.join(scenario.raw_output_dir, 'calculated_output_array.npy'),\n calculated_output_array\n )\n\n np.save(\n os.path.join(scenario.raw_output_dir,\n 'output_array_given_recovery.npy'),\n output_array_given_recovery\n )\n\n np.save(\n os.path.join(scenario.raw_output_dir, 'exp_damage_ratio.npy'),\n exp_damage_ratio\n )\n\n np.save(\n os.path.join(scenario.raw_output_dir, 'required_time.npy'),\n required_time\n )\n\n # ------------------------------------------------------------------------\n logging.info(\"\\nOutputs saved in: \" +\n Fore.GREEN + scenario.output_path + Fore.RESET + '\\n')\n\n print(\"\\nOutputs saved in:\\n\" +\n Fore.GREEN + scenario.output_path + Fore.RESET + '\\n')\n\n # ... END POST-PROCESSING\n # ****************************************************************************\n\ndef pe2pb(pe):\n \"\"\"\n Convert probability of excedence of damage states, to\n probability of being in each discrete damage state\n \"\"\"\n # sorted array: from max to min\n pex = np.sort(pe)[::-1]\n tmp = -1.0 * np.diff(pex)\n pb = np.append(tmp, pex[-1])\n pb = np.insert(pb, 0, 1 - pex[0])\n return pb\n\n\ndef main():\n SETUPFILE = sys.argv[1]\n run_scenario(SETUPFILE)\n\n print(Style.BRIGHT + Fore.YELLOW +\n \"[ Run time: %s ]\\n\" %\n str(timedelta(seconds=(time.time() - code_start_time))) +\n Style.RESET_ALL)\n\nif __name__ == '__main__':\n main()\n", "sub_path": "sifra/infrastructure_response.py", "file_name": "infrastructure_response.py", "file_ext": "py", "file_size_in_byte": 25566, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "matplotlib.use", "line_number": 15, "usage_type": "call"}, {"api_name": "colorama.Style.BRIGHT", "line_number": 36, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 36, "usage_type": "name"}, {"api_name": "colorama.Fore.GREEN", "line_number": 36, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 36, "usage_type": "name"}, {"api_name": "colorama.Style.RESET_ALL", "line_number": 38, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 38, "usage_type": "name"}, {"api_name": "sifraclasses.Scenario", "line_number": 40, "usage_type": "call"}, {"api_name": "colorama.Style.BRIGHT", "line_number": 41, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 41, "usage_type": "name"}, {"api_name": "colorama.Fore.GREEN", "line_number": 41, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 41, "usage_type": "name"}, {"api_name": "colorama.Style.RESET_ALL", "line_number": 42, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 42, "usage_type": "name"}, {"api_name": "time.time", "line_number": 43, "usage_type": "call"}, {"api_name": "model_ingest.ingest_spreadsheet", "line_number": 46, "usage_type": "call"}, {"api_name": "sifra.modelling.hazard_levels.HazardLevels", "line_number": 65, "usage_type": "call"}, {"api_name": "time.time", "line_number": 68, "usage_type": "call"}, {"api_name": "parmap.map", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.float", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 101, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.transpose", "line_number": 106, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 108, "usage_type": "call"}, {"api_name": "time.time", "line_number": 108, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 109, "usage_type": "call"}, {"api_name": "numpy.ndarray.flatten", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 136, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 139, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.ndarray.flatten", "line_number": 143, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 143, "usage_type": "attribute"}, {"api_name": "numpy.ndarray.flatten", "line_number": 144, "usage_type": "call"}, {"api_name": "numpy.ndarray", "line_number": 144, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 150, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 150, "usage_type": "name"}, {"api_name": "seaborn.set", "line_number": 151, "usage_type": "call"}, {"api_name": "seaborn.boxplot", "line_number": 153, "usage_type": "call"}, {"api_name": "seaborn.despine", "line_number": 161, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 182, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 183, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 183, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.close", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 211, "usage_type": "call"}, {"api_name": "os.path", "line_number": 211, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 215, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 216, "usage_type": "call"}, {"api_name": "os.path", "line_number": 216, "usage_type": "attribute"}, {"api_name": "zipfile.ZIP_DEFLATED", "line_number": 217, "usage_type": "attribute"}, {"api_name": "zipfile.ZipFile", "line_number": 218, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 220, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 226, "usage_type": "call"}, {"api_name": "os.path", "line_number": 226, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 230, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 237, "usage_type": "call"}, {"api_name": "os.path", "line_number": 237, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 247, "usage_type": "call"}, {"api_name": "os.path", "line_number": 247, "usage_type": "attribute"}, {"api_name": "pickle.dump", "line_number": 251, "usage_type": "call"}, {"api_name": "pandas.MultiIndex.from_tuples", "line_number": 274, "usage_type": "call"}, {"api_name": "pandas.MultiIndex", "line_number": 274, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 277, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 295, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 302, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 309, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 316, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 323, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 330, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 335, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 341, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 351, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 354, "usage_type": "call"}, {"api_name": "os.path", "line_number": 354, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 356, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 364, "usage_type": "call"}, {"api_name": "os.path", "line_number": 364, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 376, "usage_type": "call"}, {"api_name": "os.path", "line_number": 376, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 387, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 388, "usage_type": "call"}, {"api_name": "os.path", "line_number": 388, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 392, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 393, "usage_type": "call"}, {"api_name": "os.path", "line_number": 393, "usage_type": "attribute"}, {"api_name": "numpy.unique", "line_number": 415, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 433, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 437, "usage_type": "call"}, {"api_name": "sifra.modelling.hazard_levels.HazardLevels", "line_number": 440, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 451, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 455, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 463, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 466, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 469, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 469, "usage_type": "call"}, {"api_name": "os.path", "line_number": 469, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 476, "usage_type": "call"}, {"api_name": "sifra.modelling.hazard_levels.HazardLevels", "line_number": 478, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 482, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 486, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 496, "usage_type": "call"}, {"api_name": "numpy.min", "line_number": 500, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 509, "usage_type": "call"}, {"api_name": "os.path", "line_number": 509, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 522, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 523, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 525, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 532, "usage_type": "call"}, {"api_name": "os.path", "line_number": 532, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 534, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 543, "usage_type": "call"}, {"api_name": "os.path", "line_number": 543, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 565, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 566, "usage_type": "call"}, {"api_name": "os.path", "line_number": 566, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 570, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 571, "usage_type": "call"}, {"api_name": "os.path", "line_number": 571, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 575, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 576, "usage_type": "call"}, {"api_name": "os.path", "line_number": 576, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 581, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 582, "usage_type": "call"}, {"api_name": "os.path", "line_number": 582, "usage_type": "attribute"}, {"api_name": "numpy.save", "line_number": 586, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 587, "usage_type": "call"}, {"api_name": "os.path", "line_number": 587, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 592, "usage_type": "call"}, {"api_name": "colorama.Fore.GREEN", "line_number": 593, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 593, "usage_type": "name"}, {"api_name": "colorama.Fore.RESET", "line_number": 593, "usage_type": "attribute"}, {"api_name": "colorama.Fore.GREEN", "line_number": 596, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 596, "usage_type": "name"}, {"api_name": "colorama.Fore.RESET", "line_number": 596, "usage_type": "attribute"}, {"api_name": "numpy.sort", "line_number": 607, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 608, "usage_type": "call"}, {"api_name": "numpy.append", "line_number": 609, "usage_type": "call"}, {"api_name": "numpy.insert", "line_number": 610, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 615, "usage_type": "attribute"}, {"api_name": "colorama.Style.BRIGHT", "line_number": 618, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 618, "usage_type": "name"}, {"api_name": "colorama.Fore.YELLOW", "line_number": 618, "usage_type": "attribute"}, {"api_name": "colorama.Fore", "line_number": 618, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 620, "usage_type": "call"}, {"api_name": "time.time", "line_number": 620, "usage_type": "call"}, {"api_name": "colorama.Style.RESET_ALL", "line_number": 621, "usage_type": "attribute"}, {"api_name": "colorama.Style", "line_number": 621, "usage_type": "name"}]} +{"seq_id": "302404445", "text": "from django.shortcuts import render, redirect, reverse, get_object_or_404\nfrom django.contrib import messages\nfrom django.conf import settings\nfrom django.core.mail import send_mail\n\nfrom django.contrib.auth.models import User\n\nimport stripe\n\n# Create your views here.\ndef index(request):\n return render(request, 'website/index.html')\n\n\ndef newsletter(request):\n email = request.POST['email-address']\n send_mail('Newsletter Signup', email, email, ['sean@restdos.com'])\n messages.success(request,f'You have successfully signed up to our newsletter with your email: {email}')\n template = request.META['HTTP_REFERER']\n return render(request, 'website/index.html')\n\n\ndef pricing(request):\n signups = User.objects.count()\n secondary_string = 'at a permanently reduced price for the next 10 customers'\n if signups <= 10:\n monthly = 33\n primary_string = \" €33 a month\" \n plan = 'First Advantage'\n elif signups <= 20:\n monthly = 50\n primary_string = ' €50 a month'\n plan = 'Second Advantage'\n elif signups <= 30:\n monthly = 67\n primary_string = ' €67 a month'\n plan = 'Third Advantage'\n elif signups <= 40:\n monthly = 80\n primary_string = ' €80 a month'\n plan = 'Final Advantage'\n else:\n monthly = 100\n primary_string = '100 a month'\n plan = 'Complete Advantage'\n secondary_string = 'including unlimited bookings and guests'\n\n template = 'website/pricing.html'\n context = {\n 'monthly': monthly,\n 'primary_string': primary_string,\n 'secondary_string': secondary_string,\n 'plan': plan,\n }\n return render(request, template, context)\n\n\ndef contact(request):\n if request.method == 'POST':\n email = request.POST['email']\n first_name = request.POST['first-name']\n last_name = request.POST['last-name']\n restaurant = request.POST['company']\n message = request.POST['message']\n message = message + ' ' + first_name + ' ' + last_name + ' ' + restaurant + ' ' + email\n send_mail('New Message', message, email, ['sean@restdos.com'])\n messages.success(request,f'You have successfully sent a message to us. We will reply to your email: {email}')\n return render(request, 'website/contact.html')\n\ndef signup_email(request, signup_plan, signup_monthly):\n\n if request.method == 'POST':\n if User.objects.filter(email=request.POST['email']):\n messages.warning(request, 'A user with that email already exists.')\n else:\n return redirect('signup', signup_plan=signup_plan, signup_monthly=signup_monthly)\n\n\n\n context = {\n 'signup_plan': signup_plan,\n 'signup_monthly': signup_monthly,\n }\n return render(request, 'website/signup_email.html', context)\n\ndef signup(request, signup_plan, signup_monthly):\n stripe_public_key = settings.STRIPE_PUBLIC_KEY\n stripe_secret_key = settings.STRIPE_SECRET_KEY\n\n\n stripe_total = round(int(signup_monthly) * 100)\n stripe.api_key = stripe_secret_key\n intent = stripe.PaymentIntent.create(\n amount=stripe_total,\n currency=settings.STRIPE_CURRENCY,\n )\n\n if not stripe_public_key:\n messages.warning(request, 'Stripe public key is missing. \\\n Did you forget to set it in your environment?')\n\n context = {\n 'signup_plan': signup_plan,\n 'signup_monthly': signup_monthly,\n 'stripe_public_key': stripe_public_key,\n 'client_secret': intent.client_secret,\n }\n\n if not signup_plan:\n messages.error(request, 'You have not selected a signup plan')\n return redirect(reverse('pricing'))\n\n if request.method == 'POST':\n return redirect('account_signup')\n \n\n return render(request, 'website/signup.html', context)\n\n\ndef signup_success(request):\n return render(request, 'website/new_user.html') \n\n\ndef terms(request):\n return render(request, 'website/terms_and_conditions.html')\n\n\ndef privacy(request):\n return render(request, 'website/privacy.html')\n\n\ndef blog(request):\n return render(request, 'website/blog.html')\n", "sub_path": "website/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 4156, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.shortcuts.render", "line_number": 12, "usage_type": "call"}, {"api_name": "django.core.mail.send_mail", "line_number": 17, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 18, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 18, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 20, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.count", "line_number": 24, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 24, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 24, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}, {"api_name": "django.core.mail.send_mail", "line_number": 66, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 67, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 67, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 68, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.filter", "line_number": 73, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 73, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 73, "usage_type": "name"}, {"api_name": "django.contrib.messages.warning", "line_number": 74, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 74, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 76, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 84, "usage_type": "call"}, {"api_name": "django.conf.settings.STRIPE_PUBLIC_KEY", "line_number": 87, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 87, "usage_type": "name"}, {"api_name": "django.conf.settings.STRIPE_SECRET_KEY", "line_number": 88, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 88, "usage_type": "name"}, {"api_name": "stripe.api_key", "line_number": 92, "usage_type": "attribute"}, {"api_name": "stripe.PaymentIntent.create", "line_number": 93, "usage_type": "call"}, {"api_name": "stripe.PaymentIntent", "line_number": 93, "usage_type": "attribute"}, {"api_name": "django.conf.settings.STRIPE_CURRENCY", "line_number": 95, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 95, "usage_type": "name"}, {"api_name": "django.contrib.messages.warning", "line_number": 99, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 99, "usage_type": "name"}, {"api_name": "django.contrib.messages.error", "line_number": 110, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 110, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 111, "usage_type": "call"}, {"api_name": "django.shortcuts.reverse", "line_number": 111, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 114, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 117, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 121, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 125, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 129, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 133, "usage_type": "call"}]} +{"seq_id": "118180503", "text": "#!/usr/bin/env python\nfrom __future__ import division;\nfrom __future__ import print_function;\nfrom __future__ import absolute_import;\nimport sys;\nimport os;\nscriptsDir = os.environ.get(\"UTIL_SCRIPTS_DIR\");\nif (scriptsDir is None):\n raise Exception(\"Please set environment variable UTIL_SCRIPTS_DIR\");\nsys.path.insert(0,scriptsDir);\nsys.path.insert(0,scriptsDir+\"/jsondbPackage\");\nimport jsondb;\nimport pathSetter;\nimport util;\nimport fileProcessing as fp;\nimport abc;\nfrom collections import namedtuple\nimport shutil\n\nrunTrackerEmail = \"bestestFramework@stanford.edu\"\nclass RunAndAddRecords(object):\n def __init__(self, cmdKwargsGenerator, recordFromCmdKwargs, addRecordFunction):\n \"\"\"\n cmdKwargsGenerator: instance of AbstractCmdKwargsGenerator\n recordFromCmdKwargs: instance of AbstractRecordFromCmdKwargs\n addRecordFunction: just a function that adds the records to the db\n \"\"\"\n assert cmdKwargsGenerator is not None;\n assert recordFromCmdKwargs is not None;\n assert addRecordFunction is not None;\n self.cmdKwargsGenerator=cmdKwargsGenerator;\n self.recordFromCmdKwargs=recordFromCmdKwargs;\n self.addRecordFunction=addRecordFunction;\n def runAndAddRecords(self, numTrials=None):\n consecutiveFailedRecordAdds = 0;\n i = 0;\n while (numTrials is None or i < numTrials):\n print(\"Running trial \"+str(i));\n kwargs = self.cmdKwargsGenerator(); \n record = self.recordFromCmdKwargs.getRecordFromCmdKwargs(**kwargs); \n if (record is not None):\n consecutiveFailedRecordAdds=0;\n self.addRecordFunction(record);\n else:\n consecutiveFailedRecordAdds += 1;\n print(\"Skipping record add; consecutive failed adds:\",consecutiveFailedRecordAdds)\n if (consecutiveFailedRecordAdds == 5):\n raise RuntimeError(str(consecutiveFailedRecordAdds)+\" consecutive failed record adds. Ending.\");\n i += 1;\n\ndef getAddRecordAndSaveDbFunction(db, dbFile):\n def addRecordFunc(record):\n jsondb.addRecordToDbAndWriteToFile(record, db, dbFile);\n return addRecordFunc;\n \ndef getAddRecordToDbFileFunction(dbFactory, dbFile):\n def addRecordFunc(record):\n jsondb.addRecordToFile(record, dbFactory, dbFile);\n return addRecordFunc;\n \nclass AbstractCmdKwargsGenerator(object):\n __metaclass__ = abc.ABCMeta\n @abc.abstractmethod\n def __call__(self):\n raise NotImplementedError();\n\nclass CmdKwargsFromManager(AbstractCmdKwargsGenerator):\n def __init__(self, managerToCmdKwargs, manager):\n self.managerToCmdKwargs = managerToCmdKwargs;\n self.manager = manager;\n def __call__(self):\n self.manager.prepareNextSet(); \n return self.managerToCmdKwargs(self.manager); \n\nclass AbstractRecordFromCmdKwargs(object):\n \"\"\"\n given kwargs for a command, eg, launching a job,\n returns a record to go in a db\n \"\"\"\n __metaclass__ = abc.ABCMeta\n @abc.abstractmethod\n def getRecordFromCmdKwargs(self, **cmdKwargs):\n raise NotImplementedError();\n \nclass RecordFromCmdKwargsUsingLines(AbstractRecordFromCmdKwargs):\n def __init__(self, options, linesFromCmdKwargs, makeRecordFromLines_producer, logger):\n \"\"\"\n linesFromCmdKwargs: instance of AbstractMakeLinesFromCmdKwargs\n makeRecordFromLines_producer: returns an instance of AbstractMakeRecordFromLines\n logger: instance of AbstractLogger \n \"\"\"\n self.options = options;\n self.linesFromCmdKwargs=linesFromCmdKwargs;\n self.makeRecordFromLines_producer = makeRecordFromLines_producer;\n self.logger = logger;\n def getRecordFromCmdKwargs(self, **cmdKwargs):\n try:\n lines = self.linesFromCmdKwargs.getLines(**cmdKwargs);\n recordMaker = self.makeRecordFromLines_producer();\n self.logger.log(\"Parsing stdout contents of function call...\\n\")\n for line in lines:\n self.logger.log(line);\n self.logger.log(\"\\n\");\n recordMaker.processLine(line);\n if (recordMaker.isRecordReady()):\n return recordMaker.getRecord(**cmdKwargs);\n self.logger.log(\"...Done parsing stdout contents of function call\\n\")\n #if you get here, it means you couldn't make the record \n self.logger.log(\"Error! Unable to make a record! Info: \"\n +recordMaker.getInfoOnStatus());\n raise RuntimeError(\"Unable to make record; info:\\n\"\n +recordMaker.getInfoOnStatus()\n +\"\\nlog file: \"+self.logger.getInfo());\n except Exception as e:\n traceback=util.getErrorTraceback();\n emailError(self.options, self.logger.getInfo(), traceback);\n self.logger.log(\"Error!\\n\"+traceback+\"\\n\");\n print(\"caught traceback: \"+traceback);\n \ndef emailError(options, logFileInfo, traceback):\n if (options.emailMode not in [EmailModes.noEmails]):\n util.sendEmails(options.emails, runTrackerEmail\n ,\"Error when running \"+options.jobName\n ,\"Log file: \"+logFileInfo+\"\\n\"+traceback);\n\nclass AbstractMakeLinesFromCmdKwargs(object):\n __metaclass__ = abc.ABCMeta\n @abc.abstractmethod\n def getLines(self, **cmdKwargs):\n \"\"\"\n given some kwargs for a command, eg, launching a job,\n returns a lines iterator\n \"\"\"\n raise NotImplementedError();\n\nclass AbstractMakeRecordFromLines(object):\n \"\"\"\n pass it a series of output lines\n to make a record\n \"\"\"\n __metaclass__ = abc.ABCMeta\n @abc.abstractmethod\n def processLine(self, line):\n raise NotImplementedError(); \n @abc.abstractmethod\n def isRecordReady(self):\n raise NotImplementedError();\n @abc.abstractmethod\n def getInfoOnStatus(self):\n \"\"\"\n Return any info that may be useful\n for debugging why the record was\n not created\n \"\"\"\n raise NotImplementedError();\n @abc.abstractmethod\n def getRecord(self, **commandKwargs):\n raise NotImplementedError();\n\nclass Abstract_MakeKwargsFromLines(object):\n \"\"\"\n pass it a series of output lines to\n make kwargs; used by record makers\n \"\"\"\n __metaclass__ = abc.ABCMeta \n @abc.abstractmethod\n def processLine(self, line):\n raise NotImplementedError();\n @abc.abstractmethod\n def areKwargsReady(self):\n raise NotImplementedError();\n @abc.abstractmethod\n def getInfoOnStatus(self):\n raise NotImplementedError();\n @abc.abstractmethod\n def getKwargs(self):\n raise NotImplementedError();\n\nclass SubKwargsMakersHandler(object):\n def __init__(self, kwargsMakers):\n self.kwargsMakers = kwargsMakers;\n def processLine(self, line):\n for kwargsMaker in self.kwargsMakers:\n if (not kwargsMaker.areKwargsReady()):\n kwargsMaker.processLine(line);\n def isReady(self):\n return all([kwargsMaker.areKwargsReady() for kwargsMaker in self.kwargsMakers]); \n def getInfoOnStatus(self):\n return \"\\n\".join([x.getInfoOnStatus() for x in self.kwargsMakers]);\n def getKwargs(self):\n kwargs = {};\n for kwargsMaker in self.kwargsMakers:\n kwargs.update(kwargsMaker.getKwargs());\n return kwargs;\n\nclass MakeRecordFrom_MakeKwargsFromLines(AbstractMakeRecordFromLines):\n def __init__(self, kwargsMakers, recordMakerFunc):\n \"\"\"\n in order to use: must define a recordMakerFunc\n that uses the commandKwargs, and also a\n series of kwargsMakers for all the kwargs\n that are parsed from the stream.\n \"\"\"\n assert kwargsMakers is not None;\n assert recordMakerFunc is not None;\n self.subKwargsMakersHandler = SubKwargsMakersHandler(kwargsMakers);\n self.recordMakerFunc = recordMakerFunc;\n def processLine(self, line):\n self.subKwargsMakersHandler.processLine(line);\n def isRecordReady(self):\n return self.subKwargsMakersHandler.isReady();\n def getInfoOnStatus(self):\n return self.subKwargsMakersHandler.getInfoOnStatus();\n def getRecord(self, **commandKwargs):\n kwargs = self.subKwargsMakersHandler.getKwargs();\n return self.recordMakerFunc(kwargs, commandKwargs);\n\ndef get_makeRecordFromLines_producer(recordMakerFunc, kwargsMakers_producer):\n \"\"\"\n returns a function that produces a MakeRecordFrom_MakeKwargsFromLines instance.\n Uses kwargsMakers_producer to instantiate fresh kwargsMakers every time.\n \"\"\"\n assert recordMakerFunc is not None;\n assert kwargsMakers_producer is not None;\n return lambda: MakeRecordFrom_MakeKwargsFromLines(\n kwargsMakers=kwargsMakers_producer()\n ,recordMakerFunc=recordMakerFunc); \n\nclass SimpleRegex_MakeKwargsFromLines(Abstract_MakeKwargsFromLines):\n def __init__(self, kwargName, kwargTypeCast, regex, groupIndex=1, startLookingRegex=None):\n \"\"\"\n startLookingRegex: only try to match regex AFTER you have seen startLookingRegex\n \"\"\"\n import re;\n self.kwargName = kwargName\n self.kwargTypeCast = kwargTypeCast\n self.pattern = re.compile(regex)\n self.groupIndex = groupIndex;\n self.ready = False;\n self.val = None;\n self.startLookingPattern = None if startLookingRegex is None else re.compile(startLookingRegex);\n self.startLooking = startLookingRegex is None;\n def processLine(self, line):\n if (self.startLooking): \n match = self.pattern.search(line);\n if match is not None:\n self.val = self.kwargTypeCast(match.group(self.groupIndex)); \n self.ready = True;\n else:\n assert self.startLookingPattern is not None;\n match = self.startLookingPattern.search(line);\n if (match is not None):\n self.startLooking = True;\n def areKwargsReady(self):\n return self.ready;\n def getInfoOnStatus(self):\n return self.kwargName+\" is\"+(\" not\" if (not self.ready) else \"\")+\" ready\";\n def getKwargs(self):\n assert self.val is not None;\n assert self.areKwargsReady();\n return {self.kwargName: self.val}; \n\n#slightly inappropriate subclassing but I'm tired...\nclass MultilineRegex_MakeKwargsFromLines(SimpleRegex_MakeKwargsFromLines):\n def __init__(self, kwargName, kwargTypeCast, regex, groupIndex=1, startLookingRegex=None):\n \"\"\"\n startLookingRegex: only try to match regex AFTER you have seen startLookingRegex\n \"\"\"\n self.concatenatedLinesAfterStartLooking = \"\";\n super(MultilineRegex_MakeKwargsFromLines, self).__init__(\n kwargName=kwargName\n , kwargTypeCast=kwargTypeCast\n , regex=regex\n , groupIndex=1\n , startLookingRegex=startLookingRegex);\n def processLine(self, line):\n if (self.startLooking): \n self.concatenatedLinesAfterStartLooking += line+\"\\n\";\n match = self.pattern.search(self.concatenatedLinesAfterStartLooking);\n if match is not None:\n self.val = self.kwargTypeCast(match.group(self.groupIndex)); \n self.ready = True;\n else:\n assert self.startLookingPattern is not None;\n match = self.startLookingPattern.search(line);\n if (match is not None):\n self.startLooking = True;\n\n\nclass SubKwargsWrapper(Abstract_MakeKwargsFromLines):\n def __init__(self, kwargName, subKwargsMakers):\n self.kwargName = kwargName;\n self.subKwargsMakersHandler = SubKwargsMakersHandler(subKwargsMakers);\n def processLine(self, line):\n self.subKwargsMakersHandler.processLine(line);\n def areKwargsReady(self):\n return self.subKwargsMakersHandler.isReady()\n def getInfoOnStatus(self):\n return (\"For \"+self.kwargName+\"...\\n\"\n +self.subKwargsMakersHandler.getInfoOnStatus()\n +\"\\n...end\"+self.kwargName);\n def getKwargs(self, **commandKwargs):\n subKwargs = self.subKwargsMakersHandler.getKwargs();\n return {self.kwargName: subKwargs};\n\nclass AbstractLogger(object):\n __metaclass__ = abc.ABCMeta\n @abc.abstractmethod\n def log(self):\n raise NotImplementedError();\n @abc.abstractmethod\n def getInfo(self):\n raise NotImplementedError();\n @abc.abstractmethod\n def close(self):\n raise NotImplementedError();\n\nclass FileLogger(AbstractLogger):\n def __init__(self, logFileName):\n self.logFileName = logFileName;\n self.logFileHandle = fp.getFileHandle(logFileName, 'a');\n def log(self, toWrite):\n self.logFileHandle.write(toWrite);\n def getInfo(self):\n return self.logFileName;\n def close(self):\n self.logFileHandle.close();\n \nclass LinesFromFunctionStdout_NoProcessSpawned(AbstractMakeLinesFromCmdKwargs):\n def __init__(self, func, logger=None, emailErrorFunc=None):\n self.logger=logger;\n self.emailErrorFunc = emailErrorFunc;\n self.func = util.redirectStdoutToString(func, logger=logger, emailErrorFunc=emailErrorFunc); \n def getLines(self, **cmdKwargs):\n lines = self.func(**cmdKwargs)\n return lines.split(\"\\n\")\n\n#TODO: test\nclass LinesFromSpawnedProcess(AbstractMakeLinesFromCmdKwargs):\n def getLines(self, **cmdKwargs):\n \"\"\"\n cmdKwargs should have 'args', and I can't think\n of anything else it should have.\n \"\"\"\n assert 'args' in cmdKwargs;\n assert len(cmdKwargs.keys())==1;\n import subprocess;\n popen = subprocess.Popen(args=cmdKwargs['args'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n lines = iter(popen.stdout.readline, b\"\") \n return lines;\n\n#CL = command line\ndef formatArgForCL(argName, argVal):\n return \"--\"+argName+\" \"+str(argVal);\n\ndef getBestUpdateFunc(isLargerBetter, metadataCallbacks):\n \"\"\"\n updateFunc to be provided to jsondb.MetadataUpdateInfo;\n updates a metadata field to keep track of the best.\n \"\"\"\n def updateFunc(newVal, originalVal, metadataAttrName, record=None):\n update=False;\n if originalVal is None:\n update = True;\n else:\n if isLargerBetter:\n if (newVal > originalVal):\n update = True;\n else:\n if (newVal < originalVal):\n update = True; \n for metadataCallback in metadataCallbacks:\n metadataCallback(update, newVal\n , originalVal, metadataAttrName, record);\n if (update):\n return newVal;\n else:\n return originalVal;\n return updateFunc;\n\ndef getPrintAddedRecordCallback():\n def callback(record, db):\n print(\"--Record added:--\") \n print(util.formattedJsonDump(record.getJsonableObject()));\n print(\"-----------------\") \n return callback;\ndef getEmailRecordAddedCallback(emailOptions):\n def callback(record, db):\n subject = \"Record added for \"+emailOptions.jobName\n contents = util.formattedJsonDump(record.getJsonableObject());\n util.sendEmails(emailOptions.toEmails, emailOptions.fromEmail, subject, contents);\n return callback; \ndef getContents(valName, newVal, originalVal, record):\n contents = (\"New best: \"+valName+\": \"+str(newVal)+\"\\n\" \n +\"Previous best \"+valName+\": \"+str(originalVal)+\"\\n\"\n +\"Record:\\n\"+util.formattedJsonDump(record.getJsonableObject()))\n return contents; \ndef getPrintIfNewBestCallback():\n def callback(update, newVal, originalVal, valName, record):\n if (update):\n contents = getContents(valName, newVal, originalVal, record);\n print(\"-------New best!-------\")\n print(contents); \n print(\"-----------------------\");\n return callback;\n \nEmailOptions = namedtuple(\"EmailOptions\", [\"toEmails\", \"fromEmail\", \"jobName\", \"emailMode\"]);\ndef getEmailIfNewBestCallback(emailOptions, perfToTrackOptions):\n \"\"\"\n a callback to send an email when a new 'best' is attained.\n \"\"\"\n def emailCallback(update, newVal, originalVal, valName, record):\n if (update):\n if (perfToTrackOptions.thresholdPerfToEmailAt is None or\n util.isBetterOrEqual(newVal,perfToTrackOptions.thresholdPerfToEmailAt\n ,perfToTrackOptions.isLargerBetter)):\n subject = \"New best \"+valName+\" for \"+emailOptions.jobName\n contents = getContents(valName, newVal, originalVal, record);\n util.sendEmails(emailOptions.toEmails, emailOptions.fromEmail, subject, contents);\n return emailCallback;\n\ndef renameFilesWithRecordNumberCallback(savedFilesTracker):\n def callback(record, jsonDb):\n prospectiveRecordNumber=jsonDb.getNumRecords();\n #rename all the files\n for (idx, aFile) in enumerate(savedFilesTracker.currentFiles):\n newName = fp.getFileNameParts(aFile).getFilePathWithTransformation(\n lambda x: \"record_\"+str(prospectiveRecordNumber)+\"_\"+x) \n os.rename(aFile, newName); \n savedFilesTracker.currentFiles[idx] = newName;\n return callback;\n\ndef getSaveBestFilesCallback(perfToTrackOptions, savedFilesTracker):\n def callback(record, jsonDb):\n perfField = perfToTrackOptions.perfAttrName;\n recordPerf = record.getField(perfField);\n bestPerfSoFar = jsonDb.metadata.getField(perfField);\n if (util.isBetter(recordPerf, bestPerfSoFar, perfToTrackOptions.isLargerBetter)):\n newBestFiles=[];\n for currentFile in savedFilesTracker.currentFiles:\n bestFileName = savedFilesTracker.bestFileNameGivenCurrentFile(currentFile);\n print(\"Saving new best:\",currentFile,\"as\",bestFileName) \n shutil.copy(currentFile, bestFileName); \n newBestFiles.append(bestFileName);\n oldBestPerfSavedFiles = jsonDb.metadata.getField(RunTrackerMetadataFields.bestPerfSavedFiles); \n if (oldBestPerfSavedFiles is not None):\n for oldBestFile in oldBestPerfSavedFiles:\n print(\"Removing old best:\",oldBestFile);\n if (os.path.exists(oldBestFile)):\n os.remove(oldBestFile);\n else:\n print(\"WARNING: No such file: \",oldBestFile)\n jsonDb.metadata.setField(RunTrackerMetadataFields.bestPerfSavedFiles, newBestFiles)\n return callback;\n\ndef getSaveSomeFilesCallback(perfToTrackOptions, savedFilesTracker):\n def callback(record, jsonDb):\n #I think the db should be locked during all this so no issue\n #with the record number changing, I hope\n prospectiveRecordNumber = jsonDb.getNumRecords();\n \n #by default, do not save any files\n saveFiles = False; \n #if at least one of the save file constrains is active, these\n #files may qualify for saving. Check to say if either are\n #satisfied.\n if (perfToTrackOptions.minThresholdPerfToSaveFiles is not None or\n savedFilesTracker.topNtoSave is not None):\n perf = record.getField(perfToTrackOptions.perfAttrName)\n #if the \"minPerfThreshold\" constraint is met (or it's not active)...\n if (perfToTrackOptions.minThresholdPerfToSaveFiles is None or\n util.isBetterOrEqual(perf, perfToTrackOptions.minThresholdPerfToSaveFiles, perfToTrackOptions.isLargerBetter)):\n if (perfToTrackOptions.minThresholdPerfToSaveFiles is not None):\n print(\"Min threshold constraint satisfied for \"+str(prospectiveRecordNumber)+\"; \"\n +str(perf)+\" vs \"+str(perfToTrackOptions.minThresholdPerfToSaveFiles));\n #if the topN constraint is not active, that means the \n #minThreshold constraint was active and satisfied...therefore,\n #save.\n if (savedFilesTracker.topNtoSave is None):\n saveFiles=True;\n else: #otherwise, ensure the topN constraint is satisfied.\n numRecords = jsonDb.jsonableRecordsHolder.getNumRecords();\n #if there aren't even N records in the db, this will of course be one\n #of the top N.\n if (numRecords < savedFilesTracker.topNtoSave):\n print(\"Top \"+str(savedFilesTracker.topNtoSave)+\" constraint\"\n +\" satisfied as there are only \"+str(numRecords)+\" in the db\")\n saveFiles = True; \n else:\n #otherwise, get the nth record and compare perf with it\n nthRecord = (jsonDb.jsonableRecordsHolder\n .getIthSortedRecord(savedFilesTracker.topNtoSave-1))\n nthRecordPerf = nthRecord.getField(perfToTrackOptions.perfAttrName); \n #if better than nth record, delete files of nth record and evict from dict\n if (util.isBetter(perf, nthRecordPerf, perfToTrackOptions.isLargerBetter)):\n print(\"Top \"+str(savedFilesTracker.topNtoSave)+\" constraint satisfied; \"\n +\"Nth record (record no. \"+str(nthRecord.getRecordNo())\n +\" had perf \"+str(nthRecordPerf)+\" and this one (\"+str(prospectiveRecordNumber)\n +\") had perf \"+str(perf))\n saveFiles=True;\n filesToEvict = nthRecord.getField(RunTrackerRecordFields.savedFiles, noneIfAbsent=True); \n if (filesToEvict is None):\n print(\"\\n***\\nWARNING: No files to evict found for ousted Nth record \"\n +str(nthRecord.getRecordNo())+\"\\n***\");\n else:\n for aFile in filesToEvict:\n print(\"Removing:\",aFile);\n if os.path.exists(aFile)==False:\n print(\"\\n***\\nWARNING: I'm supposed to delete \"\n +str(aFile)+\" but it does not exist\\n***\");\n else: \n os.remove(aFile); \n nthRecord.removeField(RunTrackerRecordFields.savedFiles);\n #if have decided not to save these files, delete them.\n if (not saveFiles):\n for aFile in savedFilesTracker.currentFiles:\n print(\"Removing:\",aFile)\n os.remove(aFile);\n else:\n record.setField(RunTrackerRecordFields.savedFiles, savedFilesTracker.currentFiles);\n return callback;\n\nPerfToTrackOptions = namedtuple(\"PerfToTrackOptions\", [\"perfAttrName\", \"isLargerBetter\", \"thresholdPerfToEmailAt\", \"minThresholdPerfToSaveFiles\"]);\nclass SavedFilesTracker(object):\n def __init__(self, bestFileNameGivenCurrentFile, currentFiles, topNtoSave):\n self.bestFileNameGivenCurrentFile = bestFileNameGivenCurrentFile;\n self.currentFiles = currentFiles;\n self.topNtoSave = topNtoSave;\n\ndef getJsonDbFactory(emailOptions, perfToTrackOptions, JsonableRecordClass, savedFilesTracker):\n \"\"\"\n Returns a json db factory that keeps track of the best of some\n attribute and also maintains records in sorted order\n of that attribute.\n savedFilesTracker is an instance of SavedFilesTracker; keeps\n track of the old best model files and the current model\n files. Will clear out old files if current files are\n the new best, otherwise will clear out the current\n files unless minThresholdPerfToSaveFiles is not None\n \"\"\"\n keyFunc = lambda x: ((-1 if perfToTrackOptions.isLargerBetter else 1)*getattr(x,perfToTrackOptions.perfAttrName))\n JsonableRecordsHolderClass = jsondb.getSortedJsonableRecordsHolderClass(keyFunc=keyFunc); \n #the metadata callbacks are: print if there's a new best, and also save\n #the best performing model.\n metadataCallbacks = [getPrintIfNewBestCallback()];\n if emailOptions is not None and emailOptions.emailMode in [EmailModes.allEmails, EmailModes.errorsAndNewBest]:\n metadataCallbacks.append(getEmailIfNewBestCallback(emailOptions, perfToTrackOptions));\n callbacks_beforeAdd = [ renameFilesWithRecordNumberCallback(savedFilesTracker)\n , getSaveBestFilesCallback(perfToTrackOptions, savedFilesTracker)\n , getSaveSomeFilesCallback(perfToTrackOptions, savedFilesTracker)];\n callbacks_afterAdd = [getPrintAddedRecordCallback()]\n if (emailOptions is not None and emailOptions.emailMode in [EmailModes.allEmails]):\n callbacks_afterAdd.append(getEmailRecordAddedCallback(emailOptions)); \n\n MetadataClass = jsondb.getUpdateValsMetadataClass(\n [jsondb.MetadataUpdateInfo(\n metadataAttrName=perfToTrackOptions.perfAttrName\n ,recordAttrName=perfToTrackOptions.perfAttrName\n ,updateFunc=getBestUpdateFunc(\n isLargerBetter=perfToTrackOptions.isLargerBetter\n ,metadataCallbacks=metadataCallbacks)\n ,initVal=None)\n ,jsondb.NumRecordsMetadataUpdateInfo]\n ,[RunTrackerMetadataFields.bestPerfSavedFiles]); \n jsonDbFactory = jsondb.JsonDb.getFactory(JsonableRecordClass=JsonableRecordClass\n ,JsonableRecordsHolderClass=JsonableRecordsHolderClass\n ,MetadataClass=MetadataClass\n ,callbacks_beforeAdd=callbacks_beforeAdd\n ,callbacks_afterAdd=callbacks_afterAdd); \n return jsonDbFactory; \n\nRunTrackerRecordFields = util.enum(savedFiles=\"savedFiles\");\nRunTrackerMetadataFields = util.enum(bestPerfSavedFiles=\"bestPerfSavedFiles\");\n\nEmailModes = util.enum(noEmails=\"noEmails\", onlyErrorEmails=\"onlyErrorEmails\", errorsAndNewBest=\"errorsAndNewBest\", allEmails=\"allEmails\"); \ndef addRunTrackerArgumentsToParser(parser):\n parser.add_argument(\"--emails\", nargs=\"+\", required=True, help=\"Provide a dummy val if don't want emails\");\n parser.add_argument(\"--emailMode\", choices=EmailModes.vals, default=EmailModes.errorsAndNewBest);\n parser.add_argument(\"--jobName\", help=\"Used to create email subjects and log files\");\n parser.add_argument(\"--logFile\");\n parser.add_argument(\"--jsonDbFile\", required=True, help=\"Used to save the records\");\n parser.add_argument(\"--thresholdPerfToEmailAt\", type=float, help=\"New Best emails only sent above this threshold\")\n parser.add_argument(\"--minThresholdPerfToSaveFiles\", type=float, help=\"Only files above this threshold are saved\");\n parser.add_argument(\"--topNtoSave\", default=100, type=int, help=\"Keep top N performing models\");\n\ndef runTrackerArguments_fillDefaults(options):\n coreJsonDb = fp.getCoreFileName(options.jsonDbFile); \n if (options.logFile is None):\n options.logFile = fp.getFileNameParts(options.jsonDbFile)\\\n .getFilePathWithTransformation(lambda x: \"log_\"+x, extension=\".txt\");\n if (options.jobName is None):\n options.jobName = coreJsonDb; \n\n", "sub_path": "runTrackerPackage/runTracker.py", "file_name": "runTracker.py", "file_ext": "py", "file_size_in_byte": 27950, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.environ.get", "line_number": 7, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 7, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 10, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "sys.path.insert", "line_number": 11, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 11, "usage_type": "attribute"}, {"api_name": "jsondb.addRecordToDbAndWriteToFile", "line_number": 53, "usage_type": "call"}, {"api_name": "jsondb.addRecordToFile", "line_number": 58, "usage_type": "call"}, {"api_name": "abc.ABCMeta", "line_number": 62, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 63, "usage_type": "attribute"}, {"api_name": "abc.ABCMeta", "line_number": 80, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 81, "usage_type": "attribute"}, {"api_name": "util.getErrorTraceback", "line_number": 115, "usage_type": "call"}, {"api_name": "util.sendEmails", "line_number": 122, "usage_type": "call"}, {"api_name": "abc.ABCMeta", "line_number": 127, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 128, "usage_type": "attribute"}, {"api_name": "abc.ABCMeta", "line_number": 141, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 142, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 145, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 148, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 156, "usage_type": "attribute"}, {"api_name": "abc.ABCMeta", "line_number": 165, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 166, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 169, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 172, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 175, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 237, "usage_type": "call"}, {"api_name": "re.compile", "line_number": 241, "usage_type": "call"}, {"api_name": "abc.ABCMeta", "line_number": 307, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 308, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 311, "usage_type": "attribute"}, {"api_name": "abc.abstractmethod", "line_number": 314, "usage_type": "attribute"}, {"api_name": "fileProcessing.getFileHandle", "line_number": 321, "usage_type": "call"}, {"api_name": "util.redirectStdoutToString", "line_number": 333, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 348, "usage_type": "call"}, {"api_name": "subprocess.PIPE", "line_number": 348, "usage_type": "attribute"}, {"api_name": "subprocess.STDOUT", "line_number": 348, "usage_type": "attribute"}, {"api_name": "util.formattedJsonDump", "line_number": 384, "usage_type": "call"}, {"api_name": "util.formattedJsonDump", "line_number": 390, "usage_type": "call"}, {"api_name": "util.sendEmails", "line_number": 391, "usage_type": "call"}, {"api_name": "util.formattedJsonDump", "line_number": 396, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 407, "usage_type": "call"}, {"api_name": "util.isBetterOrEqual", "line_number": 415, "usage_type": "call"}, {"api_name": "util.sendEmails", "line_number": 419, "usage_type": "call"}, {"api_name": "fileProcessing.getFileNameParts", "line_number": 427, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 429, "usage_type": "call"}, {"api_name": "util.isBetter", "line_number": 438, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 443, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 449, "usage_type": "call"}, {"api_name": "os.path", "line_number": 449, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 450, "usage_type": "call"}, {"api_name": "util.isBetterOrEqual", "line_number": 472, "usage_type": "call"}, {"api_name": "util.isBetter", "line_number": 495, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 508, "usage_type": "call"}, {"api_name": "os.path", "line_number": 508, "usage_type": "attribute"}, {"api_name": "os.remove", "line_number": 512, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 518, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 523, "usage_type": "call"}, {"api_name": "jsondb.getSortedJsonableRecordsHolderClass", "line_number": 542, "usage_type": "call"}, {"api_name": "jsondb.getUpdateValsMetadataClass", "line_number": 555, "usage_type": "call"}, {"api_name": "jsondb.MetadataUpdateInfo", "line_number": 556, "usage_type": "call"}, {"api_name": "jsondb.NumRecordsMetadataUpdateInfo", "line_number": 563, "usage_type": "attribute"}, {"api_name": "jsondb.JsonDb.getFactory", "line_number": 565, "usage_type": "call"}, {"api_name": "jsondb.JsonDb", "line_number": 565, "usage_type": "attribute"}, {"api_name": "util.enum", "line_number": 572, "usage_type": "call"}, {"api_name": "util.enum", "line_number": 573, "usage_type": "call"}, {"api_name": "util.enum", "line_number": 575, "usage_type": "call"}, {"api_name": "fileProcessing.getCoreFileName", "line_number": 587, "usage_type": "call"}, {"api_name": "fileProcessing.getFileNameParts", "line_number": 589, "usage_type": "call"}]} +{"seq_id": "142937418", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 26 14:41:52 2018\n\n@author: bianl\n\n修正:\n1.将所有的输入项作为函数单独列出来,方便调试;\n2.在选择课程项目的时候可以选择多个课程项目;\n3.新教师x月产能,x默认值为用户第一次所选时间范围;\n4.修复bug & 优化\n\"\"\"\nimport datetime\nimport xlrd\nimport math\nimport numpy as np\nfrom openpyxl import Workbook\nfrom openpyxl import load_workbook\nfrom openpyxl.styles import Font\nfrom openpyxl.styles.colors import RED, BLUE\n\n\n# Set global variables\nSCHEDULE_FILENMAE = \"配课表明细16.6.1-18.5.31.xlsx\"\nENTRYDATE_FILENAME = \"国外部教师名单6.15.xlsx\"\nGAP_FILENAME = \"离职&管理层老师名单.xlsx\"\n \nCLASSTYPE = {\"英联邦VIP\":[\"雅思\",\"IELTS\"],\"雅思班级\":[\"雅思\",\"IELTS\"],\n \"托福VIP\":[\"TOEFL\"],\"托福班级\":[\"TOEFL\"],\n \"美本\":[\"SSAT\",\"SAT\",\"ACT\",\"AP\",\"北美本科精英\"],\"美研\":[\"GRE\",\"GMAT\"],\n \"其他\":[\"企业培训班\",\"英联邦中学\",\"国际英语实验班\",\"TOEIC\",\"海外留学菁英\"]} \n\n\n\ndef LoadExcelTable( filename ):\n \"\"\"\n 载入配课表等excel文件\n filename为excel文件名\n return excel表头table\n \"\"\"\n data = xlrd.open_workbook( filename ) \n table = data.sheets()[0]\n return table\n\n\ndef LoadTeachersGap( filename ):\n \"\"\"\n 载入离职&管理层老师名单, excel文件\n filename为文件名\n return 教师姓名list\n \"\"\"\n # 打开excel\n data = xlrd.open_workbook( filename ) \n table = data.sheets()[0]\n return [table.row_values(i)[0] for i in range(table.nrows)]\n\n\ndef InputDateRange():\n \"\"\"\n Function: 获取用户输入的日期范围(str),直接回车则返回Default值None。\n Input: 无\n Output: datetime格式的日期范围,或Default值None\n \"\"\"\n while True: \n st = input(\"Please enter start date (e.g. 2015-12-21): \")\n ft = input(\"Please enter end date (e.g. 2015-12-21): \")\n # Default case\n if len(st) < 1:\n stmp = None\n break\n else:\n try:\n stmp = datetime.datetime.strptime(st, \"%Y-%m-%d\") # str转datetime\n break\n except:\n print(\"Please re-enter start date in this 2015-12-21 format. \")\n continue\n while True:\n # Default case\n if len(ft) < 1:\n etmp = None\n break\n else:\n try:\n etmp = datetime.datetime.strptime(ft, \"%Y-%m-%d\")\n break\n except:\n print(\"Please re-enter start date in this 2015-12-21 format. \")\n continue\n return stmp, etmp\n \n\ndef GetDateRange( stmp, etmp, table ):\n \"\"\"\n Function:根据用户输入的时间范围,确定table中的index范围\n \n Input:\n stmp, etmp: 用户输入的时间范围(datetime)\n table:载入的配课表数据\n \n return 起始与截止日期(datetime),及其index, 该时间范围相当于多少个月\n \"\"\"\n rows = table.nrows\n # 转换excel日期到datetime格式,并找到table中的最小和最大日期\n mindate = xlrd.xldate_as_datetime(table.row_values(1) [3],0) \n maxdate = xlrd.xldate_as_datetime(table.row_values(rows - 1) [3],0)\n # Default value, 从第1行开始(第0行为header)\n if stmp is None or stmp <= mindate:\n start = 1\n p = mindate\n else:\n for i in range(1,rows - 1):\n curdate = xlrd.xldate_as_datetime(table.row_values(i) [3],0)\n if curdate == stmp:\n start = i\n p = curdate\n break \n # Default value, 在最后一行结束(Python下标从0开始,故最后一行index比excel小1)\n if etmp is None or etmp >= maxdate:\n end = rows - 1\n q = maxdate\n else:\n for i in range(max(2,start),rows - 1):# i至少从2开始\n curdate = xlrd.xldate_as_datetime(table.row_values(i) [3],0)\n predate = xlrd.xldate_as_datetime(table.row_values(i-1) [3],0)\n if predate <= etmp and curdate > etmp:\n end = i-1\n q = predate\n break\n day = (q-p).days\n month_rate = day / 30\n return p, q, start, end, round(month_rate,2)\n \n\ndef GetConstraint():\n \"\"\"\n 没有input参数,通过屏幕获得用户输入参数\n 返回Department Constraint 和 Class Type Constraint (str 或 None)\n \"\"\" \n # 输入要查询的部门\n # Default为全部部门\n DeptType = [\"北美项目部\", \"英联邦项目部\"]\n while True:\n Dep = input(\"Please input department: '北美项目部', '英联邦项目部' or both(press Enter directly) \") \n if Dep in DeptType or len(Dep) < 1:\n break\n else:\n print(\"Please re-enter department name\")\n \n # 输入要查询的课程项目\n # 提示所有的课程类型\n # Default为全部类型 \n while True:\n print(\"共有7种课程项目,分别为:\") \n print([keys for keys in CLASSTYPE.keys()],end = \" \") \n # Class Type constraints\n cyconsts = input(\"Please input class types separated by spaces: (press Enter directly = select all the type) \") \n if len(cyconsts) < 1:\n break\n flag = True\n cyconsts = cyconsts.split(\" \")\n for i in range(len(cyconsts)):\n if cyconsts[i] not in CLASSTYPE.keys():\n print(cyconsts[i] + \" is not a legal class type, Please re-enter class type name\")\n flag = False\n break\n # 如果cyconsts是legal的,则终止循环\n if flag :\n break \n return Dep, cyconsts\n\n\ndef isinClassType( Classprogram, CurrentClass, Capacity):\n \"\"\"\n Function: 检查当前课程是否满足课程项目约束\n \n Input\n Classprogram: 课程项目约束(str)\n CurrentClass: 当前课程名(str)\n Capacity: 班级容量(str)\n \n Output\n True if 当前课程CurrentClass 属于 课程项目Classprogram; False otherwise\n \"\"\"\n flag = False \n for i in range(len(Classprogram)):\n if \"VIP\" in Classprogram[i]: # VIP课一定是1对1 或 6人\n if Capacity != \"1对1\" and Capacity != \"6人\":\n continue\n if \"班级\" in Classprogram: # 班级项目一定不是 1对1 或 6人的\n if Capacity == \"1对1\" or Capacity == \"6人\":\n continue \n \n for j in range(len(CLASSTYPE[Classprogram[i]])):\n if CLASSTYPE[Classprogram[i]][j] in CurrentClass:\n flag = True\n return flag\n\n\ndef CheckConstraint(curdep, curcls, capacity, selfstudy, DepConstraint, CTConstraint):\n \"\"\"\n Function: 分别检查“取消约束”,“自习约束”,“部门约束”,“课程约束”\n \n Input\n curdep: 当前部门\n curcls: 当前课程\n capacity: 班级容量\n selfstudy: 自习约束\n DepConstraint: 部门约束\n CTConstraint: 课程约束\n \n Output\n 全部符合条件则return True, 否则return False\n \"\"\"\n # 排除已取消的课程\n if \"取消\" in curcls:\n return False \n # 排除自习课、辅导课、模考等\n if len(selfstudy) == 0:\n return False \n # Department Constraint\n if len(DepConstraint) >= 1: # 没有使用default的情况\n if DepConstraint not in curdep:\n return False\n # Class Type Constraint\n if len(CTConstraint) >=1: # 没有使用default的情况\n return isinClassType(CTConstraint, curcls, capacity)\n # 部门 & 课程约束 选择Default的情况\n return True\n \n\ndef TableFilter( table, parameters ):\n \"\"\"\n Function:梳理配课表table\n \n Input\n table: 载入的excel table\n parameter: [start_index, end_index, Department_Constraint, ClassType_Constraint]\n \n Output\n 基于resclstab(matrix),生成简化 & 统计后的配课表:ClassesTable_detail.txt(显示计算细节)\n classes: {课号:{教师姓名:[完成课时,完成课时比例,完成业绩]}} (dict)\n totalclsn: 总开班数目\n totaltn: 总参与教师数目\n \"\"\"\n start = parameters[0]\n end = parameters[1]\n Dep = parameters[2]\n cyconsts = parameters[3]\n \n resclstab = [] # Result Class Table \n classes = {} # 用于计算的classes dictionary\n for i in range(start, end):\n # 逐行读取数据\n r = table.row_values(i)\n curdep = r[0] # 当前部门\n curcls = r[2] # 当前课程种类 \n clsnum = r[1] # 当前课号\n cur_t = r[5] # 当前教师 \n # Check该行数据是否满足约束\n flag = CheckConstraint(curdep, curcls, r[7], cur_t, Dep, cyconsts) \n if not flag:\n continue \n if clsnum not in classes.keys():\n # 当前课号第一次出现\n t4tc = {cur_t:1} # teacher for this class\n # 更新classes dictionary\n classes[clsnum] = t4tc \n # 更新Result Class Table \n tmp = [curdep,curcls,clsnum,r[4],r[10],r[7],r[11]]\n resclstab.append(tmp)\n else:\n # 当前课号已存在于classes中\n if cur_t in classes[clsnum].keys(): # 当前教师已存在于该课号中\n classes[clsnum][cur_t] += 1\n else: # 当前教师不存在于该课号中\n classes[clsnum][cur_t] = 1 \n # 修改classes和resclstab \n totalclsn = len(resclstab) # Total classes number \n teacherlist = {} # 参与教师dict初值 \n for i in range(totalclsn):\n clsnum = resclstab[i][2]\n # 计算该班的业绩\n if isinstance(resclstab[i][6],float) :\n if resclstab[i][6] > 0:\n fee = resclstab[i][3] * resclstab[i][6] # 学费乘以人数\n else: \n if resclstab[i][5] == \"6人\":\n fee = resclstab[i][3] * 6\n else: \n fee = resclstab[i][3]\n else:\n fee = 0\n tct = resclstab[i][4] # Total Class Times \n # 更新该课程中每位老师的课数、课时贡献百分比 和 对应的完成业绩\n for t in classes[clsnum].keys(): \n if t not in teacherlist.keys():\n teacherlist[t] = 0\n times = classes[clsnum][t]\n p = times / tct\n classes[clsnum][t] = [times,round(p,2),round(p*fee,2)]\n resclstab[i].append(classes[clsnum])\n totaltn = len(teacherlist.keys()) # 参与教师总数\n # 输出文件:“ClassesTable_detail.txt”\n f = open(\"ClassesTable_detail.txt\" , \"w+\")\n for i in range(len(resclstab)):\n f.writelines(str(resclstab[i]) + \"\\n\")\n f.close()\n return classes, totalclsn, totaltn,\n\n \ndef GetGap ( classes, gap_list ):\n \"\"\"\n Input\n classes: {课号:{教师姓名:[完成课时,完成课时比例,完成业绩]}} (dict)\n \n Output\n availableT: available teacher list,在职教师名及其完成业绩(tuple)\n unavailableT: unavailable teacher list,离职 & 管理岗教师名及其完成业绩(tuple)\n gap:由离职 & 管理岗教师产生的业绩缺口(float)\n \"\"\"\n gap = 0\n availableT = []\n unavailableT = []\n teacherlist = {}\n # 计算教师产值列表\n for c in classes.keys():\n for t in classes[c].keys(): \n individ = classes[c][t][2] # 个人业绩\n if t not in teacherlist.keys():\n teacherlist[t] = round(individ,2)\n else:\n teacherlist[t] += round(individ,2)\n # 排序教师产值\n # 对teacherlist(dict)的value排序\n tmp = sorted(teacherlist.items(),key = lambda item:item[1], reverse = True) \n # 从availableT中找出unavailable的教师\n for i in range(len(tmp)):\n if tmp[i][0] in gap_list:\n gap += tmp[i][1]\n unavailableT.append(tmp[i])\n else:\n availableT.append(tmp[i])\n return availableT, unavailableT, gap\n\n\ndef NewTeacherDateRange( timerate ):\n \"\"\"\n Function: 通过用户输入,得到预估新教师产能的日期范围\n \n Input\n 默认值为用户在outer loop中输入的日期范围,timerate\n \n Return duration,新教师入职x个月的产能表现。\n \"\"\" \n # 考虑3个月培训期\n timerate += 3 \n prompt = (\"新教师入职x个月的产能表现,请输入x值(Deafault x = \"\n + str(timerate) + \" ):\") \n duration = input(prompt)\n if len(duration) < 1: # Default case\n return timerate\n else:\n return float(duration)\n \n \ndef GetDateRange4NTP ( sche_table, obdate, enddate ):\n \"\"\"\n Get Date Range for New Teachers's Performance\n Input\n sche_table: 配课表table\n startdate - enddate: 表示新教师入职后的测试时间段\n \n Output\n Index:[start(int), end(int), month_rate(float)] (list)\n 即[起始日期的index, 终止日期的index,在此期间该教师工作了多少个月](list)\n \"\"\"\n rows = sche_table.nrows\n # 转换excel日期到datetime格式,并找到sche_table中的最小和最大日期\n # mindate - maxdate: 表示班表中的时间范围\n mindate = xlrd.xldate_as_datetime(sche_table.row_values(1) [3],0) \n maxdate = xlrd.xldate_as_datetime(sche_table.row_values(rows - 1) [3],0) \n # (s,e)[min,max] 或 [min,max](s,e)的情况\n if enddate < mindate or obdate > maxdate:\n return \n # [min (s,e) max]的情况\n elif obdate > mindate and enddate < maxdate:\n for i in range(2,rows - 1): \n curdate = xlrd.xldate_as_datetime(sche_table.row_values(i) [3],0)\n predate = xlrd.xldate_as_datetime(sche_table.row_values(i-1) [3],0)\n if curdate == obdate:\n start = i\n p = curdate\n if predate <= enddate and curdate > enddate:\n end = i-1 \n q = predate\n break\n # (s ==[min, e) max]的情况\n elif obdate <= mindate and enddate < maxdate:\n start = 1\n p = mindate\n for i in range(2,rows - 1): \n curdate = xlrd.xldate_as_datetime(sche_table.row_values(i) [3],0)\n predate = xlrd.xldate_as_datetime(sche_table.row_values(i-1) [3],0)\n if predate <= enddate and curdate > enddate:\n end = i-1\n q = predate\n break\n # [min (s, max] == e)的情况\n elif obdate > mindate and enddate >= maxdate: \n end = rows - 1\n q = maxdate \n for i in range(2,rows - 1): \n curdate = xlrd.xldate_as_datetime(sche_table.row_values(i) [3],0)\n if curdate == obdate:\n start = i\n p = curdate\n break \n # p和q表示最终计算的日期范围\n # 受培训期影响,p通常比obdate晚3个月\n # month_rate为[p,q]期间工作的月份\n day = (q-p).days\n month_rate = day / 30\n return [start, end, month_rate]\n\n\ndef CalNTP( table, name, Dep, cyconsts, Index ): \n \"\"\"\n Function: Calculate a New Teacher's Performance per month\n \n Input\n table: 配课表信息\n name: 教师姓名\n Dep: 部门约束\n cyconsts: 课程约束\n Index:[起始日期的index(int), 终止日期的index(int),在此期间该教师工作了多少个月(float)](list)\n \n Output\n name老师入职Index[0] - Index[1]时间内,完成的平均月业绩 \n \"\"\"\n start = Index[0]\n end = Index[1]\n month_rate = Index[2]\n \n clsdict = {} # 表示老师所授所有课程,及其在其中所贡献业绩的classes dictionary\n # 读入excel \n for i in range(start, end):\n r = table.row_values(i) \n # 教师姓名约束\n if name not in r[5]:\n continue \n # 约束检查\n flag = CheckConstraint(r[0], r[2], r[7], r[5], Dep, cyconsts)\n if not flag:\n continue \n # 计算该班的业绩\n if isinstance(r[11],float) :\n if r[11] > 0:\n fee = r[4] * r[11] # 学费乘以人数\n else: \n if r[7] == \"6人\":\n fee = r[4] * 6\n else: \n fee = r[4]\n else:\n fee = 0\n # 计算classes dictionary \n if r[1] not in clsdict.keys(): # 首次记录该课号\n clsdict[r[1]] = (1/r[10]) * fee\n else:\n clsdict[r[1]] += (1/r[10]) * fee\n sum = 0\n for item in clsdict.keys():\n sum += clsdict[item]\n # 计算月平均业绩\n # sum为0,或month_rate为负表示仍在培训期内,返回0\n return round(max(0,sum/month_rate),2)\n \n\ndef NewTeacherPerform( sche_table, entry_table, duration, Dep, cyconsts ):\n \"\"\"\n Function: 计算符合约束条件的所有教师们,在入职一段时间后的月平均完成业绩表现\n \n Input\n sche_table: 配课表信息\n entry_table:教师入职时间表信息\n Dep: 部门约束\n cyconsts: 课程项目约束\n \n Output\n NTPerform(list): 符合条件的老师的月平均完成业绩表现\n performMean(float): “新”教师月平均表现的平均值\n \"\"\"\n perform = {}\n m = entry_table.nrows \n for i in range(1, m-1):\n r = entry_table.row_values(i) \n name = r[2] # teacher's name \n obdate = xlrd.xldate_as_datetime(r[3],0) # Onboarding date\n enddate = obdate + datetime.timedelta(days = (int(duration) * 30))\n Index = GetDateRange4NTP(sche_table, obdate, enddate)\n if Index is None:\n continue\n score = CalNTP(sche_table, name, Dep, cyconsts, Index)\n perform[name] = score \n # 对perform(dict)的value排序 \n tmp = sorted(perform.items(),key = lambda item:item[1], reverse = True) \n # 删除还在培训期的老师\n# tmp = [NTPerform[i][1] for i in range(len(NTPerform)) if NTPerform[i][1] != 0]\n# tmp = np.array(tmp) \n NTPerform = [tmp[i][:] for i in range(len(tmp)) if tmp[i][1] != 0]\n tmp = np.array([NTPerform[i][1] for i in range(len(NTPerform))])\n return NTPerform, round(np.mean(tmp),2) \n\n\ndef InputGoalProfit():\n \"\"\"\n \"\"\"\n # 输入目标业绩\n while True:\n profit = input(\"请输入(真实)业绩目标(万元): \") \n try:\n profit = float(profit) * 10000\n break\n except:\n print(\"请重新输入数字\")\n return profit\n\n\ndef NumofTneeded( availableT, gap, estimean, goal ):\n \"\"\"\n Function: Calculate Number of Teachers needed\n \n Input\n availableT(list): 可用教师的产能列表\n gap(float): 由不可用教师产生的产能缺口\n estimean(float): 新教师的预估产能\n goal(float): 目标面上业绩\n \n Output\n totalfee(float): 当前能够承担的总产能\n totalgap(float): 面上产能总缺口\n NTnum(int): 需要招聘教师数\n \"\"\"\n\n # 现有可用教师能够承担的总业绩\n totalfee = 0\n for i in range(len(availableT)):\n totalfee += availableT[i][1] \n # 总的产能缺口\n totalgap = goal - totalfee\n # 需要的新教师数\n NTnum = math.ceil(totalgap / estimean)\n return totalfee, totalgap, NTnum\n\n\ndef FindZombie( availableT, estimean ):\n \"\"\"\n Function: 找到可用教师产能列表中,产能小于等于新教师平均产能的行[教师,产能]\n \n Input\n availableT: 可用教师产能列表 \n estimean: 新教师平均产能\n \n Return\n 僵尸教师的index起始值与终止值 \n \"\"\"\n n = len(availableT)\n for i in range(n):\n if availableT[i][1] <= estimean:\n return i, n\n \n \ndef Output( startdate, enddate, timerate, Dep, cyconsts, \n totalclsn, totaltn, totalfee, goal, \n availableT, unavailableT, gap, totalgap,\n NTPerform, performMean, duration, NTnum ):\n \"\"\"\n Function: 将4部分信息,输出到一个excel文件中\n startdate, enddate: 分析数据的起始 & 终止日期\n \n Output\n Part1: 可用教师完成业绩情况\n Part2: 不可用教师完成业绩情况\n Part3: 新教师表现\n Part4: 总结,需要招聘人数\n \"\"\"\n estimean = timerate * performMean\n daterange = startdate.strftime(\"%Y-%m-%d\") + \"至\" + enddate.strftime(\"%Y-%m-%d\")\n filename = daterange + \"招聘人数说明.xlsx\"\n \n try:\n # 文件已存在的话,打开文件\n wb = load_workbook( filename ) \n # 新建一个sheet\n ws = wb.create_sheet() \n except:\n # 文件不存在,首次写入文件\n # 在内存中创建一个workbook对象,而自动创建一个worksheet \n wb = Workbook() \n # 获取当前活跃的worksheet,默认就是第一个worksheet\n ws = wb.active \n finally: \n # Part1部分 \n if len(Dep) < 1:\n department = \"全部部门\"\n if len(cyconsts) < 1:\n program = \"全部课程项目\"\n else:\n program = ','.join(cyconsts) + \"课程项目\" \n part1strA4 = \"可用教师完成业绩:\" + str(round(totalfee,2)) + \"元\"\n part1strD2 = \"共开班: \" + str(totalclsn) + \"节\"\n part1strD3 = \"共\" + str(totaltn) + \"位教师参与教学\"\n # 部分字体格式 \n font1 = Font( name = 'Calibri', size = 11, bold = True, italic = False, vertAlign = None,\n underline = 'none', strike = False, color = RED ) \n # Part1内容\n ws[\"A1\"].font = ws[\"A2\"].font = ws[\"A3\"].font = ws[\"A4\"].font = ws[\"D2\"].font = ws[\"D3\"].font = font1 \n ws[\"A1\"] = daterange; ws[\"A2\"] = department; ws[\"A3\"] = program; ws[\"A4\"] = part1strA4 \n for i in range(len(availableT)):\n ws.append(availableT[i])\n ws[\"D2\"] = part1strD2; ws[\"D3\"] = part1strD3\n # 将僵尸教师字体改为蓝色\n [p, r] = FindZombie( availableT, estimean )\n p += 5; r += 5\n for i in range(p, r):\n ws.cell(row = i, column = 1).font = Font( color = BLUE ) \n ws.cell(row = i, column = 2).font = Font( color = BLUE ) \n ws.cell(row = p, column = 3, value = \"← 小于等于新教师平均表现\").font = Font( bold = True, color = RED ) \n # Part2内容\n ws[\"F1\"].font = ws[\"F2\"].font = ws[\"F3\"].font = ws[\"F4\"].font = font1\n part2F4 = \"离职 & 管理层老师形成的业绩缺口:\" + str(round(gap,2)) + \"元\"\n ws[\"F1\"] = daterange; ws[\"F2\"] = department; ws[\"F3\"] = program; ws[\"F4\"] = part2F4\n for i in range(len(unavailableT)):\n ws.cell(row = 5 + i, column = 6 , value = unavailableT[i][0])\n ws.cell(row = 5 + i, column = 7 , value = unavailableT[i][1])\n # Part3内容 \n part3K2 = \"教师入职满\" + str(duration) + \"个月时的产能月平均表现:\"\n part3K3 = \"新教师产能月平均表现的均值为:\" + str(round(performMean,2)) + \"元\"\n part3K4 = \"对应\" + daterange + \", \" + str(round(timerate,2)) + \"个月的产能:\" + str(round(estimean,2)) + \"元\" \n ws[\"K1\"].font = ws[\"K2\"].font = ws[\"K3\"].font = ws[\"K4\"].font = ws[\"L1\"].font = font1\n ws[\"K1\"] = department; ws[\"L1\"] = program; ws[\"K2\"] = part3K2; ws[\"K3\"] = part3K3\n ws[\"K4\"] = part3K4\n for i in range(len(NTPerform)):\n ws.cell(row = 5 + i, column = 11 , value = NTPerform[i][0])\n ws.cell(row = 5 + i, column = 12 , value = NTPerform[i][1])\n # Part4内容\n part4N8 = (\"目标产能缺口为:\" + str(round(goal - totalfee - gap , 2)) + \n \"元,不可用教师缺口为:\" + str(round(gap,2)) + \"元\" +\n \"面上产能总缺口为:\" + str(round(totalgap, 2)) + \"元\")\n part4N9 = '''如果维持\"不活跃\"教师现状不变,则此段时间所需的新教师数:'''\n part4N10 = \"等于 面上产能总缺口 / 新教师平均产能表现 \"\n part4N11 = \"等于:\" + str(NTnum) + \"人\"\n ws[\"N7\"].font = ws[\"N8\"].font = ws[\"N9\"].font = ws[\"N10\"].font = ws[\"N11\"].font = font1\n ws[\"N14\"].font = Font( size = 10, color = BLUE )\n ws[\"N7\"] = \"Summary: \"; ws[\"N8\"] = part4N8; ws[\"N9\"] = part4N9; ws[\"N10\"] = part4N10\n ws[\"N11\"] = part4N11 \n # 保存文件\n wb.save( filename )\n\n\ndef DoesChangeDep():\n \"\"\"\n Function: 是否重新选择部门与项目进行下一次分析\n Return True如果需要重新分析,False otherwise\n \"\"\"\n while True: \n ans = input(\"Choose another department/program to analysis again ? [y / n]: \")\n if ans != \"n\" and ans != \"y\":\n print(\"Please input the legal parameter [y / n]\")\n continue\n if ans == \"y\":\n return True\n elif ans == \"n\":\n return False\n \n\ndef DoesChangeDate(): \n \"\"\"\n Function: 是否重新选择时间范围进行下一次分析\n Return True如果需要重新分析,False otherwise\n \"\"\"\n while True: \n ans = input(\"Choose another date range to analysis again ? [y / n]: \")\n if ans != \"n\" and ans != \"y\":\n print(\"Please input the legal parameter [y / n]\")\n continue\n if ans == \"y\":\n return True\n elif ans == \"n\":\n return False\n\n \ndef CalcuNum( sche_table, entry_table, gap_list ):\n while True: \n # 用户输入 & 转换日期范围\n [stmp, etmp] = InputDateRange()\n [startdate, enddate, start, end, timerate] = GetDateRange( stmp, etmp, sche_table ) \n while True:\n [Dep, cyconsts] = GetConstraint() # 部门约束 & 课程约束 \n # Step1.计算约束条件下的教师产能列表,缺口等统计数据\n # 梳理 & 统计 每个班对应的各教师贡献业绩,总开课数,总教师数\n [classes, totalclsn, totaltn] = TableFilter( sche_table, [start, end, Dep, cyconsts] ) \n # 计算可用 & 非可用教师名单,以及由此产生的业绩缺口 \n [availableT, unavailableT, gap] = GetGap( classes, gap_list ) \n # Step2. 计算老师入职后一段时间的平均表现\n duration = NewTeacherDateRange( timerate ) \n [NTPerform, performMean] = NewTeacherPerform( sche_table, entry_table, duration, Dep, cyconsts )\n # Step3. 计算需要招聘人数\n estimean = timerate * performMean # 新教师在所选日期范围内的平均业绩表现\n goal = InputGoalProfit() # 目标业绩\n [totalfee, totalgap, NTnum] = NumofTneeded( availableT, gap, estimean, goal )\n # Step4. 输出计算结果\n Output( startdate, enddate, timerate, Dep, cyconsts, \n totalclsn, totaltn, totalfee, goal, \n availableT, unavailableT, gap, totalgap,\n NTPerform, performMean, duration, NTnum )\n # 是否重新选择部门 / 项目分析\n if not DoesChangeDep():\n break\n # 是否重新选择时间分析\n if not DoesChangeDate():\n break\n\n \n# Main program\nif __name__ == '__main__':\n # 载入配课表, 教师入职时间表, 离职&管理层老师名单\n sche_table = LoadExcelTable( SCHEDULE_FILENMAE ) \n entry_table = LoadExcelTable( ENTRYDATE_FILENAME )\n gap_list = LoadTeachersGap( GAP_FILENAME ) \n # 计算缺口 & 招聘人数 \n CalcuNum( sche_table, entry_table, gap_list )", "sub_path": "201805_Programs/XDF_RecruitsNum_v2.py", "file_name": "XDF_RecruitsNum_v2.py", "file_ext": "py", "file_size_in_byte": 28103, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "xlrd.open_workbook", "line_number": 41, "usage_type": "call"}, {"api_name": "xlrd.open_workbook", "line_number": 53, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 73, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 73, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 85, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 85, "usage_type": "attribute"}, {"api_name": "xlrd.xldate_as_datetime", "line_number": 105, "usage_type": "call"}, {"api_name": "xlrd.xldate_as_datetime", "line_number": 106, "usage_type": "call"}, {"api_name": "xlrd.xldate_as_datetime", "line_number": 113, "usage_type": "call"}, {"api_name": "xlrd.xldate_as_datetime", "line_number": 124, "usage_type": "call"}, {"api_name": "xlrd.xldate_as_datetime", "line_number": 125, "usage_type": "call"}, {"api_name": "xlrd.xldate_as_datetime", "line_number": 381, "usage_type": "call"}, {"api_name": "xlrd.xldate_as_datetime", "line_number": 382, "usage_type": "call"}, {"api_name": "xlrd.xldate_as_datetime", "line_number": 389, "usage_type": "call"}, {"api_name": "xlrd.xldate_as_datetime", "line_number": 390, "usage_type": "call"}, {"api_name": "xlrd.xldate_as_datetime", "line_number": 403, "usage_type": "call"}, {"api_name": "xlrd.xldate_as_datetime", "line_number": 404, "usage_type": "call"}, {"api_name": "xlrd.xldate_as_datetime", "line_number": 414, "usage_type": "call"}, {"api_name": "xlrd.xldate_as_datetime", "line_number": 499, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 500, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 512, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 513, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 553, "usage_type": "call"}, {"api_name": "openpyxl.load_workbook", "line_number": 594, "usage_type": "call"}, {"api_name": "openpyxl.Workbook", "line_number": 600, "usage_type": "call"}, {"api_name": "openpyxl.styles.Font", "line_number": 615, "usage_type": "call"}, {"api_name": "openpyxl.styles.colors.RED", "line_number": 616, "usage_type": "name"}, {"api_name": "openpyxl.styles.Font", "line_number": 627, "usage_type": "call"}, {"api_name": "openpyxl.styles.colors.BLUE", "line_number": 627, "usage_type": "name"}, {"api_name": "openpyxl.styles.Font", "line_number": 628, "usage_type": "call"}, {"api_name": "openpyxl.styles.colors.BLUE", "line_number": 628, "usage_type": "name"}, {"api_name": "openpyxl.styles.Font", "line_number": 629, "usage_type": "call"}, {"api_name": "openpyxl.styles.colors.RED", "line_number": 629, "usage_type": "name"}, {"api_name": "openpyxl.styles.Font", "line_number": 655, "usage_type": "call"}, {"api_name": "openpyxl.styles.colors.BLUE", "line_number": 655, "usage_type": "name"}]} +{"seq_id": "84956724", "text": "from DataSet import ReconstructDataSet, RT_ReconstructDataSet, ValidationTransferDataSet\nfrom torch.utils.data.dataloader import DataLoader\nfrom tqdm import trange\nfrom models.model import Model\nimport argparse\nfrom torch.nn.parallel import DataParallel\nfrom tqdm import tqdm\nfrom torch.utils.tensorboard import SummaryWriter\nimport torch\nimport os\nimport utils\nimport yaml\nimport os, cv2, traceback, shutil\nimport numpy as np\nimport random\n\ntorch.manual_seed(1)\nrandom.seed(2)\nnp.random.seed(3)\n\nimport wandb\nwandb.init(sync_tensorboard=True)\n\ndef validation(model, validation_loader, device, epoch, subject_name, image_size, writer):\n\n\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n folder = os.path.join('validation/')\n if not os.path.exists(folder):\n os.system(\"mkdir -p \"+folder)\n\n subject_folder_name = os.path.join(folder, '/'.join(subject_name.split('/')[-3:]) )\n if not os.path.exists(subject_folder_name):\n\n os.makedirs(subject_folder_name)\n print (\"Writing to folder: {}\".format(subject_folder_name))\n out_fname = os.path.join(subject_folder_name, \"{}_vid.mp4\".format(epoch))\n cv2_writer = cv2.VideoWriter(out_fname, fourcc, 24, (image_size*3, image_size))\n print (out_fname)\n\n background = torch.ones((image_size, image_size))\n model = Model(config, \"finetune\")\n iter_loader = iter(validation_loader)\n model.prepare_for_finetune(next(iter_loader), background)\n model = model.to(device)\n model.background_start = model.background_start.to(device)\n\n\n vid_to_tensor = []\n\n with torch.no_grad():\n try:\n iterator = tqdm(enumerate(validation_loader), total=len(validation_loader))\n for i, data in iterator:\n data_gpu = {key: item.to(device) for key, item in data.items()}\n\n mask, fake_image, real_image, body, coordinate, texture = model(data_gpu, \"inference\")\n\n label = utils.d_colorize(data_gpu[\"body\"]).cpu().numpy()\n B, _, H, W = coordinate.size()\n\n real_image = data['image'].cpu().numpy()\n fake_image = np.clip(fake_image.cpu().numpy(), 0, 1)\n\n outputs = np.concatenate((real_image, label, fake_image), axis=3)\n for output in outputs:\n write_image = (output[::-1].transpose((1, 2, 0)) * 255).astype(np.uint8)\n\n vid_to_tensor.append(torch.tensor(write_image))\n\n cv2_writer.write(write_image)\n\n except Exception as e:\n print(traceback.format_exc())\n cv2_writer.release()\n\n cv2_writer.release()\n\n vid_to_tensor = torch.stack(vid_to_tensor, dim =0).unsqueeze(0)\n vid_to_tensor = vid_to_tensor.permute(0,1,4,2,3)\n\n writer.add_video(tag=\"Validation/Video\", vid_tensor = vid_to_tensor, fps=60)\n\n\ndef pretrain(config, writer, device_idxs=[0]):\n\n print (config)\n device = torch.device(\"cuda:\" + str(device_idxs[0]))\n\n dataset = ReconstructDataSet(config['dataroot'], config)\n dataset_RT = RT_ReconstructDataSet('/vid_data/FSMR_data/rebecca_taylor_top_v2/train', config, min_sequence_len=5)\n\n joint_dataset = torch.utils.data.ConcatDataset([dataset, dataset_RT])\n print (joint_dataset)\n print (len(joint_dataset), len(dataset), len(dataset_RT))\n\n joined_filelist = dataset.filelists + dataset_RT.filelists\n\n\n sampler = utils.TrainSampler(config['batchsize'], dataset.filelists)\n sampler_RT = utils.TrainSampler(config['batchsize'], dataset_RT.filelists)\n joint_sampler = utils.TrainSampler(config['batchsize'], joined_filelist)\n\n data_loader = DataLoader(dataset, batch_sampler=sampler, num_workers=16, pin_memory=True)\n data_loader_RT = DataLoader(dataset_RT, batch_sampler=sampler_RT, num_workers=0, pin_memory=True)\n joint_dataloader = DataLoader(joint_dataset, batch_sampler=joint_sampler, num_workers=8, pin_memory=True)\n\n #/data/FSMR_data/rebecca_taylor_top/test/000019B126/subject_1/'\n # validation_dataset = ValidationTransferDataSet(root='/vid_data/FSMR_data/top_data/train/91-2Jb8DkfS/',\n # src_root='/vid_data/FSMR_data/rebecca_taylor_top/test/000019B126/subject_1/',\n # config=config)\n #\n # validation_loader = DataLoader(validation_dataset,\n # 1, num_workers=4,\n # pin_memory=True,\n # shuffle=False)\n totol_step = 0\n\n\n model = Model(config, \"train\")\n model.prepare_for_train_RT(n_class=len(dataset_RT.filelists))\n model = model.to(device)\n model = DataParallel(model, device_idxs)\n model.train()\n\n print (model)\n\n for epoch in trange(config['epochs']):\n\n for i, data in tqdm(enumerate(joint_dataloader)):\n\n data_gpu = {key: item.to(device) for key, item in data.items()}\n mask, fake_image, textures, body, cordinate, losses = model(data_gpu, \"train_UV_RT\")\n\n if i % 200 <= 100:\n mask, fake_image, textures, body, cordinate, losses = model(data_gpu, \"train_UV_RT\")\n else:\n mask, fake_image, textures, body, cordinate, losses = model(data_gpu, \"train_texture_RT\")\n\n for key, item in losses.items():\n losses[key] = item.mean()\n writer.add_scalar(\"Loss/\"+key, losses[key], totol_step)\n\n if i % 200 <= 100:\n model.module.optimizer_G.zero_grad()\n # model.module.optimizer_texture_stack.zero_grad()\n\n else:\n model.module.optimizer_T.zero_grad()\n\n loss_G = losses.get(\"loss_G_L1\", 0) + losses.get(\"loss_G_GAN\", 0) + losses.get(\"loss_G_GAN_Feat\", 0) + losses.get(\"loss_G_mask\", 0) \\\n + losses.get(\"loss_texture\", 0) * config['l_texture'] + losses.get(\"loss_coordinate\", 0) * config['l_coordinate'] \\\n + losses.get(\"loss_mask\", 0) * config['l_mask']\n\n loss_G.backward()\n\n if i % 200 <= 100:\n model.module.optimizer_G.step()\n # model.module.optimizer_texture_stack.step()\n\n else:\n model.module.optimizer_T.step()\n\n\n writer.add_scalar(\"Loss/G\", loss_G, totol_step)\n\n if totol_step % config['display_freq'] == 0:\n body_sum = body.sum(dim=1, keepdim=True)\n B, _, H, W = cordinate.size()\n cordinate_zero = torch.zeros((B, 1, H, W), dtype=torch.float32, device=cordinate.device)\n mask_label = torch.argmax(mask, dim=1, keepdim=True)\n\n cordinate_u = torch.gather(dim=1, index=mask_label, input=torch.cat((torch.zeros_like(cordinate_zero), cordinate[:, :24]), dim=1))\n cordinate_v = torch.gather(dim=1, index=mask_label, input=torch.cat((torch.zeros_like(cordinate_zero), cordinate[:, 24:]), dim=1))\n writer.add_images(\"Cordinate/U\", utils.colorize(cordinate_u)*data_gpu[\"foreground\"].expand_as(data[\"image\"]).to(torch.float32), totol_step, dataformats=\"NCHW\")\n writer.add_images(\"Cordinate/V\", utils.colorize(cordinate_v)*data_gpu[\"foreground\"].expand_as(data[\"image\"]).to(torch.float32), totol_step, dataformats=\"NCHW\")\n b, _, h, w = textures.size()\n writer.add_images(\"Texture\", torch.clamp(textures[0].view(24, 3, h, w), 0, 1), totol_step, dataformats=\"NCHW\")\n b, c, h, w = data_gpu[\"texture\"][0].size()\n writer.add_images(\"Texture_Input\", data_gpu[\"texture\"][0].view(b, 24, 3, h, w).view(b * 24, 3, h, w), totol_step, dataformats=\"NCHW\")\n writer.add_images(\"Mask/Generate\", (1 - mask[:,0]).unsqueeze(1), totol_step, dataformats='NCHW')\n writer.add_images(\"Mask/Individual\", utils.d_colorize(mask_label), totol_step, dataformats=\"NCHW\")\n writer.add_images(\"Mask/Target\", data[\"foreground\"], totol_step, dataformats=\"NCHW\")\n writer.add_images(\"Image/Fake\", torch.clamp(fake_image, 0, 1), totol_step, dataformats=\"NCHW\")\n writer.add_images(\"Image/True\", data[\"image\"] * data[\"foreground\"].expand_as(data[\"image\"]).to(torch.float32), totol_step, dataformats=\"NCHW\")\n writer.add_images(\"Input/body\", body_sum, totol_step, dataformats=\"NCHW\")\n\n totol_step+=1\n\n\n #validate\n\n\n\n model.module.save('latest_train_finetune_rebecca_taylor')\n model.module.save(str(epoch+1)+\"_train_finetune_rebecca_taylor\")\n\n model.module.scheduler_G.step()\n print (\"Validation\")\n # validation(model, validation_loader, device,epoch, subject_name = validation_loader.dataset.src_root, image_size=config['resize'], writer=writer)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config\")\n parser.add_argument(\"--device\", type=int, nargs='+')\n args = parser.parse_args()\n with open(args.config) as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n writer = SummaryWriter(log_dir=os.path.join(config['checkpoint_path'], config[\"name\"], \"train\"), comment=config['name'])\n pretrain(config, writer, args.device)\n", "sub_path": "finetune_RT.py", "file_name": "finetune_RT.py", "file_ext": "py", "file_size_in_byte": 9170, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "torch.manual_seed", "line_number": 17, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random.seed", "line_number": 19, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 19, "usage_type": "attribute"}, {"api_name": "wandb.init", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "attribute"}, {"api_name": "os.system", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 32, "usage_type": "call"}, {"api_name": "os.path", "line_number": 32, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 33, "usage_type": "call"}, {"api_name": "os.path", "line_number": 33, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.VideoWriter", "line_number": 38, "usage_type": "call"}, {"api_name": "torch.ones", "line_number": 41, "usage_type": "call"}, {"api_name": "models.model.Model", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 51, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 53, "usage_type": "call"}, {"api_name": "utils.d_colorize", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 63, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 67, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 69, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 74, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 79, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 88, "usage_type": "call"}, {"api_name": "DataSet.ReconstructDataSet", "line_number": 90, "usage_type": "call"}, {"api_name": "DataSet.RT_ReconstructDataSet", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.utils.data.ConcatDataset", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 93, "usage_type": "attribute"}, {"api_name": "utils.TrainSampler", "line_number": 100, "usage_type": "call"}, {"api_name": "utils.TrainSampler", "line_number": 101, "usage_type": "call"}, {"api_name": "utils.TrainSampler", "line_number": 102, "usage_type": "call"}, {"api_name": "torch.utils.data.dataloader.DataLoader", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.utils.data.dataloader.DataLoader", "line_number": 105, "usage_type": "call"}, {"api_name": "torch.utils.data.dataloader.DataLoader", "line_number": 106, "usage_type": "call"}, {"api_name": "models.model.Model", "line_number": 120, "usage_type": "call"}, {"api_name": "torch.nn.parallel.DataParallel", "line_number": 123, "usage_type": "call"}, {"api_name": "tqdm.trange", "line_number": 128, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 130, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 170, "usage_type": "attribute"}, {"api_name": "torch.argmax", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.gather", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.gather", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.zeros_like", "line_number": 174, "usage_type": "call"}, {"api_name": "utils.colorize", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 175, "usage_type": "attribute"}, {"api_name": "utils.colorize", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 176, "usage_type": "attribute"}, {"api_name": "torch.clamp", "line_number": 178, "usage_type": "call"}, {"api_name": "utils.d_colorize", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.clamp", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 185, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 204, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 209, "usage_type": "call"}, {"api_name": "yaml.FullLoader", "line_number": 209, "usage_type": "attribute"}, {"api_name": "torch.utils.tensorboard.SummaryWriter", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 210, "usage_type": "call"}, {"api_name": "os.path", "line_number": 210, "usage_type": "attribute"}]} +{"seq_id": "565816519", "text": "from operator import itemgetter\n\nfrom app.util.helpers import compute_salary_score, compute_qualification_score\nfrom app.model.application import Application\nfrom app.model.offer import Offer\nfrom app.model.requirement import Requirement\nfrom app.model.skill import Skill\nfrom flask import jsonify\nfrom flask_jwt import current_identity\nfrom sqlalchemy import func, or_, and_\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom werkzeug.datastructures import MultiDict\nfrom werkzeug.exceptions import NotFound, Unauthorized, BadRequest\n\nfrom app.db.database import db_session\nfrom app.model.user import User\n\n\nclass OfferRepository:\n\n @staticmethod\n def offer_view(offer_id: int, user: User):\n try:\n offer = db_session.query(Offer).get(offer_id)\n offer_dict = offer.to_dict()\n applications = db_session \\\n .query(Application.id) \\\n .filter(and_(\n Application.offer_id == offer_id,\n Application.status == 'SUBMITTED')) \\\n .count()\n if user.id == offer.user_id:\n offer_dict['applications'] = applications\n else:\n offer_dict['applications'] = len(applications)\n return offer_dict\n except NoResultFound:\n raise NotFound()\n\n @staticmethod\n def offer_all():\n offers = db_session.query(Offer).all()\n items = {'count': len(offers), 'items': [offer.to_dict() for offer in offers]}\n return items\n\n @staticmethod\n def matching_offers(user: User, request_args: MultiDict):\n min_salary = request_args.get('salary', 0, int)\n sort_by = request_args.get('sort_by', None, str)\n ascending = request_args.get('order', 'asc', str)\n if ascending not in ['asc', 'desc']:\n raise BadRequest()\n ascending = ascending == 'asc'\n user_id = user.id\n offers = db_session.query(Offer). \\\n join(Requirement, Offer.id == Requirement.offer_id). \\\n join(Skill, func.lower(Skill.name) == func.lower(Requirement.name)).filter(\n and_(\n Skill.user_id == user_id,\n or_(\n Skill.skill_level >= Requirement.skill_level,\n Skill.experience >= Requirement.experience,\n )\n )).all()\n offers_dict = []\n for offer in offers:\n result = db_session.query(\n Requirement.experience,\n Requirement.skill_level,\n Skill.experience,\n Skill.skill_level). \\\n filter(Requirement.offer_id == offer.id). \\\n outerjoin(Skill, func.lower(Skill.name) == func.lower(Requirement.name)). \\\n filter(or_(Skill.user_id == user_id, Skill.user_id.is_(None))). \\\n all()\n offer = offer.to_dict()\n scores = [compute_qualification_score(*x) for x in result]\n score = sum(scores) / len(scores)\n offer['qualification_score'] = score\n offer['salary_score'] = compute_salary_score(offer['minimum_salary'], offer['maximum_salary'], min_salary)\n offers_dict.append(offer)\n if sort_by is not None and len(offers_dict) > 0 and sort_by in offers_dict[0].keys():\n offers_dict = sorted(offers_dict, key=itemgetter(sort_by), reverse=(ascending is False))\n items = {\n 'items': offers_dict,\n 'count': len(offers)\n }\n return items\n\n @staticmethod\n def my_offers(user: User):\n offers = db_session.query(Offer).filter(Offer.user_id == user.id).all()\n items = {\n 'items': [x.to_dict() for x in offers],\n 'count': len(offers)\n }\n return items\n\n @staticmethod\n def add_offer(user: User, offer_json: dict):\n min_salary = offer_json['minimum_salary']\n max_salary = offer_json['maximum_salary']\n offer = Offer(user, offer_json['position_name'], offer_json['description'], min_salary, max_salary)\n db_session.add(offer)\n db_session.commit()\n return offer.to_dict()\n\n @staticmethod\n def offer_edit(offer_id: int, user: User, offer_json: dict):\n offer = db_session.query(Offer).get(offer_id)\n if offer.user_id != user.id:\n raise Unauthorized()\n try:\n offer.position_name = offer_json['position_name']\n offer.description = offer_json['description']\n offer.minimum_salary = offer_json['minimum_salary']\n offer.maximum_salary = offer_json['maximum_salary']\n db_session.commit()\n return offer.to_dict()\n except NoResultFound:\n raise NotFound()\n\n @staticmethod\n def offer_delete(offer_id: int, user: User):\n try:\n offer = db_session.query(Offer).get(offer_id)\n if offer.user_id != user.id:\n raise Unauthorized()\n db_session.delete(offer)\n db_session.commit()\n return offer.to_dict()\n except NoResultFound:\n raise NotFound()\n", "sub_path": "backend/app/repository/offer_repository.py", "file_name": "offer_repository.py", "file_ext": "py", "file_size_in_byte": 5131, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "app.model.user.User", "line_number": 22, "usage_type": "name"}, {"api_name": "app.db.database.db_session.query", "line_number": 24, "usage_type": "call"}, {"api_name": "app.model.offer.Offer", "line_number": 24, "usage_type": "argument"}, {"api_name": "app.db.database.db_session", "line_number": 24, "usage_type": "name"}, {"api_name": "app.db.database.db_session.query", "line_number": 26, "usage_type": "call"}, {"api_name": "app.db.database.db_session", "line_number": 26, "usage_type": "name"}, {"api_name": "app.model.application.Application.id", "line_number": 27, "usage_type": "attribute"}, {"api_name": "app.model.application.Application", "line_number": 27, "usage_type": "name"}, {"api_name": "sqlalchemy.and_", "line_number": 28, "usage_type": "call"}, {"api_name": "app.model.application.Application.offer_id", "line_number": 29, "usage_type": "attribute"}, {"api_name": "app.model.application.Application", "line_number": 29, "usage_type": "name"}, {"api_name": "app.model.application.Application.status", "line_number": 30, "usage_type": "attribute"}, {"api_name": "app.model.application.Application", "line_number": 30, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.exc.NoResultFound", "line_number": 37, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.NotFound", "line_number": 38, "usage_type": "call"}, {"api_name": "app.db.database.db_session.query", "line_number": 42, "usage_type": "call"}, {"api_name": "app.model.offer.Offer", "line_number": 42, "usage_type": "argument"}, {"api_name": "app.db.database.db_session", "line_number": 42, "usage_type": "name"}, {"api_name": "app.model.user.User", "line_number": 47, "usage_type": "name"}, {"api_name": "werkzeug.datastructures.MultiDict", "line_number": 47, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.BadRequest", "line_number": 52, "usage_type": "call"}, {"api_name": "app.model.skill.Skill", "line_number": 57, "usage_type": "argument"}, {"api_name": "app.model.requirement.Requirement", "line_number": 56, "usage_type": "argument"}, {"api_name": "app.db.database.db_session.query", "line_number": 55, "usage_type": "call"}, {"api_name": "app.model.offer.Offer", "line_number": 55, "usage_type": "argument"}, {"api_name": "app.db.database.db_session", "line_number": 55, "usage_type": "name"}, {"api_name": "app.model.offer.Offer.id", "line_number": 56, "usage_type": "attribute"}, {"api_name": "app.model.offer.Offer", "line_number": 56, "usage_type": "name"}, {"api_name": "app.model.requirement.Requirement.offer_id", "line_number": 56, "usage_type": "attribute"}, {"api_name": "sqlalchemy.func.lower", "line_number": 57, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 57, "usage_type": "name"}, {"api_name": "app.model.skill.Skill.name", "line_number": 57, "usage_type": "attribute"}, {"api_name": "app.model.requirement.Requirement.name", "line_number": 57, "usage_type": "attribute"}, {"api_name": "app.model.requirement.Requirement", "line_number": 57, "usage_type": "name"}, {"api_name": "sqlalchemy.and_", "line_number": 58, "usage_type": "call"}, {"api_name": "app.model.skill.Skill.user_id", "line_number": 59, "usage_type": "attribute"}, {"api_name": "app.model.skill.Skill", "line_number": 59, "usage_type": "name"}, {"api_name": "sqlalchemy.or_", "line_number": 60, "usage_type": "call"}, {"api_name": "app.model.skill.Skill.skill_level", "line_number": 61, "usage_type": "attribute"}, {"api_name": "app.model.skill.Skill", "line_number": 61, "usage_type": "name"}, {"api_name": "app.model.requirement.Requirement.skill_level", "line_number": 61, "usage_type": "attribute"}, {"api_name": "app.model.requirement.Requirement", "line_number": 61, "usage_type": "name"}, {"api_name": "app.model.skill.Skill.experience", "line_number": 62, "usage_type": "attribute"}, {"api_name": "app.model.skill.Skill", "line_number": 62, "usage_type": "name"}, {"api_name": "app.model.requirement.Requirement.experience", "line_number": 62, "usage_type": "attribute"}, {"api_name": "app.model.requirement.Requirement", "line_number": 62, "usage_type": "name"}, {"api_name": "app.model.skill.Skill", "line_number": 73, "usage_type": "argument"}, {"api_name": "app.db.database.db_session.query", "line_number": 67, "usage_type": "call"}, {"api_name": "app.db.database.db_session", "line_number": 67, "usage_type": "name"}, {"api_name": "app.model.requirement.Requirement.experience", "line_number": 68, "usage_type": "attribute"}, {"api_name": "app.model.requirement.Requirement", "line_number": 68, "usage_type": "name"}, {"api_name": "app.model.requirement.Requirement.skill_level", "line_number": 69, "usage_type": "attribute"}, {"api_name": "app.model.requirement.Requirement", "line_number": 69, "usage_type": "name"}, {"api_name": "app.model.skill.Skill.experience", "line_number": 70, "usage_type": "attribute"}, {"api_name": "app.model.skill.Skill", "line_number": 70, "usage_type": "name"}, {"api_name": "app.model.skill.Skill.skill_level", "line_number": 71, "usage_type": "attribute"}, {"api_name": "app.model.skill.Skill", "line_number": 71, "usage_type": "name"}, {"api_name": "app.model.requirement.Requirement.offer_id", "line_number": 72, "usage_type": "attribute"}, {"api_name": "app.model.requirement.Requirement", "line_number": 72, "usage_type": "name"}, {"api_name": "sqlalchemy.func.lower", "line_number": 73, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 73, "usage_type": "name"}, {"api_name": "app.model.skill.Skill.name", "line_number": 73, "usage_type": "attribute"}, {"api_name": "app.model.requirement.Requirement.name", "line_number": 73, "usage_type": "attribute"}, {"api_name": "app.model.requirement.Requirement", "line_number": 73, "usage_type": "name"}, {"api_name": "sqlalchemy.or_", "line_number": 74, "usage_type": "call"}, {"api_name": "app.model.skill.Skill.user_id", "line_number": 74, "usage_type": "attribute"}, {"api_name": "app.model.skill.Skill", "line_number": 74, "usage_type": "name"}, {"api_name": "app.model.skill.Skill.user_id.is_", "line_number": 74, "usage_type": "call"}, {"api_name": "app.util.helpers.compute_qualification_score", "line_number": 77, "usage_type": "call"}, {"api_name": "app.util.helpers.compute_salary_score", "line_number": 80, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 83, "usage_type": "call"}, {"api_name": "app.model.user.User", "line_number": 91, "usage_type": "name"}, {"api_name": "app.db.database.db_session.query", "line_number": 92, "usage_type": "call"}, {"api_name": "app.model.offer.Offer", "line_number": 92, "usage_type": "argument"}, {"api_name": "app.db.database.db_session", "line_number": 92, "usage_type": "name"}, {"api_name": "app.model.offer.Offer.user_id", "line_number": 92, "usage_type": "attribute"}, {"api_name": "app.model.user.User", "line_number": 100, "usage_type": "name"}, {"api_name": "app.model.offer.Offer", "line_number": 103, "usage_type": "call"}, {"api_name": "app.db.database.db_session.add", "line_number": 104, "usage_type": "call"}, {"api_name": "app.db.database.db_session", "line_number": 104, "usage_type": "name"}, {"api_name": "app.db.database.db_session.commit", "line_number": 105, "usage_type": "call"}, {"api_name": "app.db.database.db_session", "line_number": 105, "usage_type": "name"}, {"api_name": "app.model.user.User", "line_number": 109, "usage_type": "name"}, {"api_name": "app.db.database.db_session.query", "line_number": 110, "usage_type": "call"}, {"api_name": "app.model.offer.Offer", "line_number": 110, "usage_type": "argument"}, {"api_name": "app.db.database.db_session", "line_number": 110, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.Unauthorized", "line_number": 112, "usage_type": "call"}, {"api_name": "app.db.database.db_session.commit", "line_number": 118, "usage_type": "call"}, {"api_name": "app.db.database.db_session", "line_number": 118, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.exc.NoResultFound", "line_number": 120, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.NotFound", "line_number": 121, "usage_type": "call"}, {"api_name": "app.model.user.User", "line_number": 124, "usage_type": "name"}, {"api_name": "app.db.database.db_session.query", "line_number": 126, "usage_type": "call"}, {"api_name": "app.model.offer.Offer", "line_number": 126, "usage_type": "argument"}, {"api_name": "app.db.database.db_session", "line_number": 126, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.Unauthorized", "line_number": 128, "usage_type": "call"}, {"api_name": "app.db.database.db_session.delete", "line_number": 129, "usage_type": "call"}, {"api_name": "app.db.database.db_session", "line_number": 129, "usage_type": "name"}, {"api_name": "app.db.database.db_session.commit", "line_number": 130, "usage_type": "call"}, {"api_name": "app.db.database.db_session", "line_number": 130, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.exc.NoResultFound", "line_number": 132, "usage_type": "name"}, {"api_name": "werkzeug.exceptions.NotFound", "line_number": 133, "usage_type": "call"}]} +{"seq_id": "13226828", "text": "\n# import torch\nfrom sklearn.metrics import f1_score, accuracy_score, classification_report\nfrom dataset.utils import get_label2idx\nfrom tqdm import tqdm\n\n\ndef evaluate(model, loader, device):\n\n true_label = []\n pred_label = []\n\n ground_truth = []\n predicted_class = []\n \n label2idx = get_label2idx()\n idx2label = dict(zip(label2idx.values(), label2idx.keys()))\n \n\n\n model.eval()\n model = model.to(device)\n\n for batch in tqdm(loader):\n\n outputs = model(input_ids=batch['input_ids'].to(device), attention_mask=batch['attention_mask'].to(device), _len=batch['_len']).argmax(dim=-1)\n\n pred_label += outputs.cpu().detach().tolist()\n true_label += batch['target'].cpu().tolist()\n \n ground_truth += batch['label']\n predicted_class += [idx2label[each] for each in outputs.cpu().detach().tolist()]\n\n # limit to evaluate only on one batch comment it before final run\n # break\n\n f1 = f1_score(y_true=true_label, y_pred=pred_label, average='macro')\n accuracy = accuracy_score(y_true=true_label, y_pred=pred_label)\n cr = classification_report(y_true=ground_truth, y_pred=predicted_class, digits=4)\n \n\n return ground_truth, predicted_class, f1, accuracy, cr", "sub_path": "evaluation.py", "file_name": "evaluation.py", "file_ext": "py", "file_size_in_byte": 1244, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "dataset.utils.get_label2idx", "line_number": 16, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 24, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 37, "usage_type": "call"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 38, "usage_type": "call"}, {"api_name": "sklearn.metrics.classification_report", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "374194", "text": "# This file manages the stripscan measurements and it is intended to be used as a plugin for the QTC software\n\nimport sys\n\nfrom scipy import stats\n\nsys.path.append('../modules')\nfrom ..VisaConnectWizard import *\nfrom ..utilities import *\nl = logging.getLogger(__name__)\n\nhelp = help_functions()\nvcw = VisaConnectWizard.VisaConnectWizard()\ntrans = transformation()\n\n\nclass stripscan_class:\n\n def __init__(self, main_class):\n \"\"\"\n This class takes only one parameter, the main class, in which all parameters must be prevalent. It further\n starts the actual stripscan measuremnt, no further action need to be taken\n\n :param main_class:\n \"\"\"\n self.main = main_class\n self.switching = self.main.switching\n self.current_voltage = self.main.settings[\"Defaults\"][\"bias_voltage\"]\n self.voltage_End = self.main.job_details[\"stripscan\"][\"EndVolt\"]\n self.voltage_Start = self.main.job_details[\"stripscan\"][\"StartVolt\"]\n self.voltage_steps = self.main.job_details[\"stripscan\"][\"Steps\"]\n self.complience = self.main.job_details[\"stripscan\"][\"Complience\"]\n self.bias_SMU = self.main.devices[\"IVSMU\"]\n self.LCR_meter = self.main.devices[\"LCR\"]\n self.SMU2 = self.main.devices[\"2410SMU\"]\n self.discharge_SMU = self.main.devices[\"2410SMU\"]\n self.discharge_switching = self.main.devices[\"temphum_controller\"]\n self.elmeter = self.main.devices[\"Elmeter\"]\n self.measurement_order = [\"Istrip\", \"Rpoly\", \"Idark\", \"Cac\", \"Cint\", \"Cback\", \"Idiel\", \"Rint\", \"frequencyscan\"]\n self.units = [(\"Istrip\",\"current[A]\"), (\"Rpoly\", \"res[Ohm]\"),\n (\"Idiel\",\"current[A]\"), (\"Idark\",\"current[A]\"),\n (\"Rint\", \"res[Ohm]\"), (\"Cac\", \"cap[F]\"),\n (\"Cint\", \"cap[F]\"), (\"Cback\", \"cap[F]\")]\n self.strips = self.main.total_strips # now the program knows the total number of strips\n self.current_strip = self.main.main.default_dict[\"Defaults\"][\"current_strip\"] # Current pad position of the table\n #self.T = self.main.main.default_dict[\"Defaults\"][\"T\"]\n #self.V0 = self.main.main.default_dict[\"Defaults\"][\"V0\"]\n self.height = self.main.main.default_dict[\"Defaults\"][\"height_movement\"]\n self.samples = 3\n self.last_istrip_pad = -1 # Number of the last pad on which a I strip was conducted, important for rpoly\n self.T = self.main.main.default_dict[\"Defaults\"][\"trans_matrix\"]\n self.V0 = self.main.main.default_dict[\"Defaults\"][\"V0\"]\n self.job = self.main.job_details\n self.sensor_pad_data = self.main.pad_data[self.job[\"Project\"]][self.job[\"Sensor\"]]\n self.justlength = 24\n self.rintslopes = [] # here all values from the rint is stored\n\n\n # Preconfig the electrometer for current measurements, zero corr etc.\n commands = [(\"set_zero_check\", \"ON\"),\n (\"set_measure_current\", \"\"),\n (\"set_auto_current_range\", \"OFF\"),\n (\"set_current_range\", \"20e-12\"),\n (\"set_zero_correlation\", \"ON\"),\n (\"set_current_range\", \"20e-9\"),\n (\"set_auto_current_range\", \"ON\")\n ]\n self.main.config_setup(self.elmeter, commands)\n\n # Actually does something\n if \"stripscan\" in self.main.job_details:\n if \"frequencyscan\" not in self.main.job_details[\"stripscan\"]:\n if \"singlestrip\" in self.main.job_details[\"stripscan\"]:\n self.do_singlestrip(self.main.job_details[\"stripscan\"][\"singlestrip\"])\n else:\n self.do_stripscan()\n\n if \"stripscan\" in self.main.job_details:\n if \"frequencyscan\" in self.main.job_details[\"stripscan\"]:\n self.do_frequencyscan(self.main.job_details[\"stripscan\"][\"frequencyscan\"][\"Measurements\"].keys(),\n # gets the measurements\n self.main.job_details[\"stripscan\"][\"frequencyscan\"][\"Strip\"],\n # the strip which should be measured\n self.LCR_meter, 5, # device and sample size\n self.main.job_details[\"stripscan\"][\"frequencyscan\"][\"StartFreq\"],\n self.main.job_details[\"stripscan\"][\"frequencyscan\"][\"EndFreq\"],\n self.main.job_details[\"stripscan\"][\"frequencyscan\"][\"FreqSteps\"],\n self.main.job_details[\"stripscan\"][\"frequencyscan\"][\"MinVolt\"])\n\n # Ramp down the voltage after stripscan\n\n # Discharge the capacitors in the decouple box\n\n # Switch to IV for correct biasing for ramp\n self.switching.switch_to_measurement(\"IV\") # correct bias is applied\n\n self.main.ramp_voltage(self.bias_SMU, \"set_voltage\", self.current_voltage, 0, self.voltage_steps, wait_time=0.3, complience=0.001)\n self.main.change_value(self.bias_SMU,\"set_voltage\", \"0\")\n self.main.change_value(self.bias_SMU, \"set_output\", \"0\")\n\n if not self.main.capacitor_discharge(self.discharge_SMU, self.discharge_switching, \"set_terminal\", \"FRONT\", do_anyway=True):\n self.stop_everything()\n\n #self.save_rint_slopes()y\n\n def stop_everything(self):\n \"\"\"Stops the measurement\n A signal will be genereated and send to the event loops, which sets the statemachine to stop all measurements\"\"\"\n order = {\"ABORT_MEASUREMENT\": True} # just for now\n self.main.queue_to_main.put(order)\n\n def save_rint_slopes(self):\n # If a rint measurement was done save the data to a file\n if self.main.save_data and self.rintslopes:\n\n filepath = self.main.job_details[\"Filepath\"]\n filename = \"rint_ramp_\" + self.main.job_details[\"Filename\"]\n\n rintsettings = self.main.main.default_dict[\"Rint_MinMax\"]\n header = self.main.job_details[\"Header\"]\n header += \" #Ramp Voltage: \" + str(rintsettings[0]) + \" - \"+ str(rintsettings[0])+ \"V\\n\"\n header += \" #Delta V: \" + str(int(rintsettings[2])) + \" V\\n\\n\"\n\n unitsheader = \"Pad[#]\".ljust(self.justlength) + \"Voltage[V]\".ljust(self.justlength) + \"Current[A]\".ljust(self.justlength)\n\n header += [unitsheader for i in self.rintslopes]\n header += \"\\n\"\n file = self.main.create_data_file(header, filepath, filename)\n\n # TODO: save the values to the file\n\n # xvalue, rint, voltage_list, values_list ,slope, intercept, r_value, p_value, std_err\n #for pad in self.rintslopes:\n # string_to_write += self.rintslopes\n\n\n\n #self.main.write(file, string_to_write + \"\\n\")\n\n def do_preparations_for_stripscan(self):\n \"\"\"This function prepares the setup, like ramping the voltage and steady state check\n \"\"\"\n if self.main.save_data and \"frequencyscan\" not in self.main.job_details[\"stripscan\"]:\n self.main.write(self.main.measurement_files[\"stripscan\"], self.main.job_details[\"stripscan\"].get(\"Additional Header\", \"\")) # TODO: pretty useless, an additional header to the file if necessary\n\n # Add the additional params to the header\n params_string = \"\"\n for key, value in self.sensor_pad_data.get(\"additional_params\", {}).items():\n params_string += \"# \" + str(key) + \": \" + str(value) + \"\\n\"\n params_string += \"\\n\\n\"\n self.main.write(self.main.measurement_files[\"stripscan\"], params_string)\n # extend for additional files\n\n # Switch to IV for correct biasing for ramp\n if not self.switching.switch_to_measurement(\"IV\"):\n self.stop_everything()\n\n #Configure the setup, complience and switch on the smu\n self.main.config_setup(self.bias_SMU, [(\"set_complience_current\", str(self.complience))])\n self.main.change_value(self.bias_SMU, \"set_output\", \"1\")\n\n # Move the table down while ramp\n self.main.table.move_down(self.height)\n\n # Ramps the voltage, if ramp voltage returns false something went wrong -> stop\n if not self.main.ramp_voltage(self.bias_SMU, \"set_voltage\", self.voltage_Start, self.voltage_End, self.voltage_steps, wait_time = 1, complience=self.complience):\n self.current_voltage = self.main.main.default_dict[\"Defaults\"][\"bias_voltage\"]\n self.stop_everything()\n\n #If everything works make steady state check\n else:\n if self.main.steady_state_check(self.bias_SMU, max_slope = 1e-6, wait = 0, samples = 3, Rsq = 0.5, complience=self.complience): # Is a dynamic waiting time for the measuremnts\n self.current_voltage = self.main.main.default_dict[\"Defaults\"][\"bias_voltage\"]\n if self.main.check_complience(self.bias_SMU, self.complience): #if complience is reached stop everything\n self.stop_everything()\n else:\n self.stop_everything()\n\n # Move the table up again\n self.main.table.move_up(self.height)\n\n def do_singlestrip(self, job):\n \"\"\"This function conducts the measurements defined for a single strip measurement\"\"\"\n self.do_preparations_for_stripscan()\n\n if not self.main.stop_measurement():\n measurement_header = \"Pad\".ljust(self.justlength) # indicates the measuremnt\n unit_header = \"#\".ljust(self.justlength) # indicates the units for the measurement\n\n # Now add the new header to the file\n if self.main.save_data:\n self.main.write(self.main.measurement_files[\"stripscan\"], measurement_header + \"\\n\" + unit_header + \"\\n\")\n\n # Discharge the capacitors in the decouple box\n if not self.main.capacitor_discharge(self.discharge_SMU, self.discharge_switching, \"set_terminal\", \"FRONT\", do_anyway=True): self.stop_everything()\n\n # Conduct the actual measurements and send it to the main\n for measurement in self.measurement_order:\n if measurement in job[\"Measurements\"] and not self.main.main.stop_measurement: # looks if measurement should be done\n\n # Now conduct the measurement\n self.main.table.move_to_strip(self.sensor_pad_data, int(job[\"Strip\"] - 1), trans, self.T, self.V0, self.height)\n value = getattr(self, \"do_\" + measurement)(job[\"Strip\"], self.samples, write_to_main = False)\n\n # Write this to the file\n if value and self.main.save_data:\n self.main.write(self.main.measurement_files[\"stripscan\"],str(float(value)).ljust(self.justlength)) # Writes the value to the file\n else:\n if self.main.save_data:\n self.main.write(self.main.measurement_files[\"stripscan\"],\n \"--\".ljust(self.justlength)) # Writes nothing if no value is aquired\n\n # Write the data back to the GUI thread\n if value:\n self.main.queue_to_main.put({str(measurement): [int(job[\"Strip\"]), float(value)]})\n\n\n\n\n # Write new line\n if self.main.save_data:\n self.main.write(self.main.measurement_files[\"stripscan\"], \"\\n\")\n\n @help.timeit\n def do_stripscan(self):\n '''This function manages all stripscan measurements, also the frequency scan things\n Its ment to be used only once during the initiatior of the class'''\n\n self.do_preparations_for_stripscan()\n\n\n if not self.main.stop_measurement():\n # generate the list of strips per measurement which should be conducted and the units and so on for the\n measurement_header = \"Pad\".ljust(self.justlength) # indicates the measuremnt\n unit_header = \"#\".ljust(self.justlength) # indicates the units for the measurement\n for measurement in self.measurement_order:\n if measurement in self.main.job_details[\"stripscan\"]: # looks if measurement should be done\n # Now generate a list of strips from the settings of the measurement\n min = self.main.job_details[\"stripscan\"][measurement][\"start_strip\"]\n max = self.main.job_details[\"stripscan\"][measurement][\"end_strip\"]\n delta = self.main.job_details[\"stripscan\"][measurement][\"measure_every\"]\n strip_list = self.main.ramp_value(min, max, delta)\n self.main.job_details[\"stripscan\"][measurement].update({\"strip_list\": strip_list})\n unit_index = [x[0] for x in self.units].index(measurement) # gets me the index for the units\n unit_header += str(self.units[unit_index][1]).ljust(self.justlength)\n measurement_header += str(measurement).ljust(self.justlength)\n\n # Now add humidity and temperature header\n if self.main.job_details.get(\"environemnt\", True):\n measurement_header += \"Temperature\".ljust(self.justlength)+\"Humidity\".ljust(self.justlength)\n unit_header += \"degree[C]\".ljust(self.justlength)+\"rel. percent[rel%]\".ljust(self.justlength)\n\n # Now add the new header to the file\n if self.main.save_data:\n self.main.write(self.main.measurement_files[\"stripscan\"], measurement_header + \"\\n\" + unit_header + \"\\n\")\n\n # Discharge the capacitors in the decouple box\n if not self.main.capacitor_discharge(self.discharge_SMU, self.discharge_switching, \"set_terminal\", \"FRONT\", do_anyway=True):\n self.stop_everything()\n\n # Do the actual measurements, first move, then conduct\n #Todo: make it possible to measure from up to down\n #results = []\n for current_strip in range(1, int(self.strips)): # Loop over all strips\n if not self.main.stop_measurement(): # Prevents that empty entries will be written to file after aborting the measurement\n self.current_strip = current_strip\n #results.append({}) # Adds an empty dict to the results for the bad strip detection\n start = time.time() # start timer for a strip measurement\n if self.main.save_data:\n self.main.write(self.main.measurement_files[\"stripscan\"], str(self.sensor_pad_data[\"data\"][current_strip-1][0]).ljust(self.justlength)) # writes the strip to the file\n for measurement in self.measurement_order:\n if measurement in self.main.job_details[\"stripscan\"] and not self.main.main.stop_measurement: # looks if measurement should be done\n\n # Now conduct the measurement\n # But first check if this strip should be measured with this specific measurement\n if current_strip in self.main.job_details[\"stripscan\"][measurement][\"strip_list\"]:\n self.main.table.move_to_strip(self.sensor_pad_data, self.current_strip-1, trans, self.T, self.V0, self.height)\n\n if not self.main.stop_measurement() and not self.main.check_complience(self.bias_SMU, self.complience):\n value = getattr(self, \"do_\"+measurement)(current_strip, self.samples)\n\n # In the end do a quick bad strip detection\n badstrip = self.main.main.analysis.do_online_singlestrip_analysis((measurement, value))\n if badstrip:\n l.info(\"Badstrip detected at strip: \" + str(current_strip) + \" Error code: \" + str(badstrip))\n self.main.queue_to_main.put({\"Thresholderror\": \"Badstrip detected at strip: \" + str(current_strip) + \" Error code: \" + str(badstrip)})\n # Add the bad strip to the list of bad strips\n if str(current_strip) in self.main.badstrip_dict:\n self.main.badstrip_dict[str(current_strip)].update(badstrip)\n else:\n self.main.badstrip_dict[str(current_strip)] = badstrip\n self.main.main.default_dict[\"Defaults\"][\"Bad_strips\"] += 1 # increment the counter\n\n\n # Write this to the file\n if value and self.main.save_data:\n self.main.write(self.main.measurement_files[\"stripscan\"], str(float(value)).ljust(self.justlength)) # Writes the value to the file\n else:\n if self.main.save_data:\n self.main.write(self.main.measurement_files[\"stripscan\"], \"--\".ljust(self.justlength)) # Writes nothing if no value is aquired\n\n if not self.main.stop_measurement():\n # After all measurements are conducted write the environment variables to the file\n if self.main.job_details.get(\"enviroment\", False):\n string_to_write = str(self.main.main.temperatur_history[-1]).ljust(self.justlength) + str(self.main.main.humidity_history[-1]).ljust(self.justlength)\n self.main.write(self.main.measurement_files[\"stripscan\"], string_to_write)\n\n # Write new line\n if self.main.save_data:\n self.main.write(self.main.measurement_files[\"stripscan\"], \"\\n\")\n\n # Do the bad strip detection\n\n\n if abs(float(start - time.time())) > 1.: # Rejects all measurements which are to short to be real measurements\n delta = float(self.main.main.default_dict[\"Defaults\"][\"strip_scan_time\"]) + abs(start - time.time())\n self.main.main.default_dict[\"Defaults\"][\"strip_scan_time\"] = str(delta / 2.) # updates the time for strip measurement\n\n\n def do_frequencyscan(self, measurement_obj, strip, device_dict, samples, startfreq, endfreq, steps, voltage):\n '''This function performes a frequency scan of the lcr meter and at each step it executes a LIST of mesaurement'''\n\n self.do_preparations_for_stripscan()\n\n if not self.main.stop_measurement():\n # Generate frequency list\n freq_list = self.main.ramp_value_log10(startfreq, endfreq, steps)\n\n # Create a measurement file for the frequency scan, (per strip)\n if self.main.save_data:\n filepath = self.main.job_details[\"Filepath\"]\n filename = \"fre_strip_\" + str(int(strip)) + \"_\" + self.main.job_details[\"Filename\"]\n\n header = self.main.job_details[\"Header\"]\n header += \" #AC Voltage: \" + str(voltage) + \"\\n\"\n header += \" #Measured strip: \" + str(int(strip)) + \"\\n\\n\"\n for meas in measurement_obj:\n func_name = str(meas)\n header += str(func_name) + \"\\t\\t\\t\\t\"\n header += \"\\n\"\n\n for meas in measurement_obj: # adds the units header\n header += \"frequency [Hz]\".ljust(self.justlength) + \"capacitance [F]\".ljust(self.justlength)\n header += \"\\n\"\n\n file = self.main.create_data_file(header, filepath, filename)\n\n # Set the LCR amplitude voltage for measurement\n self.main.change_value(self.LCR_meter, \"set_voltage\", str(voltage))\n\n # Moves to strip\n self.main.table.move_to_strip(self.sensor_pad_data, int(self.job[\"stripscan\"][\"frequencyscan\"][\"Strip\"])-1, trans, self.T, self.V0, self.height)\n\n for freq in freq_list: #does the loop over the frequencies\n if not self.main.stop_measurement(): #stops the loop if shutdown is necessary\n self.main.change_value(self.LCR_meter, \"set_frequency\", str(freq))\n value = []\n for i, meas in enumerate(measurement_obj):\n func_name = str(meas)\n value.append(getattr(self, \"do_\" + func_name)(freq, samples=samples, freqscan=True)) #calls the measurement\n # Append the data to the data array and sends it to the main as frequency scan measurement\n if not self.main.stop_measurement():\n self.main.measurement_data[func_name + \"_scan\"][0] = np.append(self.main.measurement_data[func_name + \"_scan\"][0],[float(freq)])\n self.main.measurement_data[func_name + \"_scan\"][1] = np.append(self.main.measurement_data[func_name + \"_scan\"][1], [float(value[i])])\n self.main.queue_to_main.put({func_name + \"_scan\": [float(freq), float(value[i])]})\n\n if self.main.save_data:\n string_to_write = \"\"\n for val in value:\n string_to_write += str(freq).ljust(self.justlength) + str(val).ljust(self.justlength)\n self.main.write(file, string_to_write + \"\\n\")\n else:\n break\n\n def __do_simple_measurement(self, str_name, device, xvalue = -1, samples = 5, write_to_main = True):\n '''\n Does a simple measurement - really simple. Only acquire some values and build the mean of it\n\n :param str_name: What measurement is conducted\n :param device: Which device schould be used\n :param xvalue: Which strip we are on, -1 means arbitrary\n :param samples: How many samples should be taken\n :param write_to_main: Writes the value back to the main loop\n :return: Returns the mean of all aquired values\n '''\n # Do some averaging over values\n values = []\n for i in range(samples): # takes samples\n values.append(float(str(vcw.query(device, device[\"Read\"])).split(\",\")[0]))\n value = sum(values) / len(values) # averaging\n\n self.main.measurement_data[str(str_name)][0] = np.append(self.main.measurement_data[str(str_name)][0],[float(xvalue)])\n self.main.measurement_data[str(str_name)][1] = np.append(self.main.measurement_data[str(str_name)][1],[float(value)])\n\n if write_to_main: # Writes data to the main, or not\n self.main.queue_to_main.put({str(str_name): [float(xvalue), float(value)]})\n\n return value\n\n def do_Rpoly(self, xvalue = -1, samples = 5, write_to_main = True):\n '''Does the rpoly measurement'''\n device_dict = self.SMU2\n if not self.main.stop_measurement():\n if not self.switching.switch_to_measurement(\"Rpoly\"):\n self.stop_everything()\n return\n voltage = -1.\n self.main.config_setup(device_dict, [(\"set_source_voltage\", \"\"), (\"set_measure_current\", \"\"),(\"set_voltage\", voltage), (\"set_complience\", 90E-6), (\"set_output\", \"ON\")]) # config the 2410 for 1V bias on bias and DC pad\n self.main.steady_state_check(device_dict, max_slope=1e-6, wait=0, samples=3, Rsq=0.5, check_complience=False) # Is a dynamic waiting time for the measuremnt\n value = self.__do_simple_measurement(\"Rpoly\", device_dict, xvalue, samples, write_to_main=False) # This value is istrip +\n # Now subtract the Istrip\n if self.last_istrip_pad == xvalue:\n #todo: richtiger wert nehemen\n Istrip = self.main.measurement_data[\"Istrip\"][1][-1]\n else:# If no Istrip then aquire a value\n l.info(\"No Istrip value for Rpoly calculation could be found, Istrip measurement will be conducted on strip {}\".format(int(xvalue)))\n Istrip = self.do_Istrip(xvalue, samples, False)\n # Iges = Ipoly+Istrip\n value = float(value)-float(Istrip) # corrected current value\n\n rpoly = voltage/float(value)\n\n if write_to_main: # Writes data to the main, or not\n self.main.measurement_data[str(\"Rpoly\")][0] = np.append(self.main.measurement_data[str(\"Rpoly\")][0],[float(xvalue)])\n self.main.measurement_data[str(\"Rpoly\")][1] = np.append(self.main.measurement_data[str(\"Rpoly\")][1],[float(rpoly)])\n self.main.queue_to_main.put({str(\"Rpoly\"): [float(xvalue), float(rpoly)]})\n\n self.main.config_setup(device_dict, [(\"set_output\", \"OFF\")])\n\n return rpoly\n\n def do_Rint(self, xvalue = -1, samples = 5, write_to_main = True):\n '''Does the Rint measurement'''\n device_dict = self.elmeter\n voltage_device = self.SMU2\n d = device_dict\n rint = 0\n config_commands = [(\"set_zero_check\", \"ON\"), (\"set_measure_current\", \"\"), (\"set_zero_check\", \"OFF\")]\n if not self.main.stop_measurement():\n if not self.switching.switch_to_measurement(\"Rint\"):\n self.stop_everything()\n return\n self.main.config_setup(voltage_device, [(\"set_voltage\", 0), (\"set_complience\", 50E-6)]) # config the 2410\n self.main.config_setup(device_dict, config_commands) # config the elmeter\n self.main.change_value(voltage_device, \"set_output\", \"ON\") # Sets the output of the device to on\n\n rintsettings = self.main.main.default_dict[\"Defaults\"][\"Rint_MinMax\"]\n minvoltage = rintsettings[0]\n maxvoltage = rintsettings[1]\n steps = rintsettings[2]\n\n voltage_list = self.main.ramp_value(minvoltage, maxvoltage, steps)\n\n # Get to the first voltage and wait till steady state\n self.main.change_value(voltage_device, \"set_voltage\", minvoltage)\n if True or self.main.steady_state_check(device_dict, max_slope=1e-2, wait=0, samples=5, Rsq=0.3, check_complience=False): # Is a dynamic waiting time for the measuremnt\n values_list = []\n past_volts = []\n for i, voltage in enumerate(voltage_list): # make all measurements for the Rint ramp\n if not self.main.stop_measurement():\n self.main.change_value(voltage_device, \"set_voltage\", voltage)\n value = self.__do_simple_measurement(\"Rint_scan\", device_dict, xvalue, samples, write_to_main=False)\n values_list.append(float(value))\n past_volts.append(float(voltage))\n self.main.queue_to_main.put({\"Rint_scan\": [past_volts, values_list]})\n\n if not self.main.stop_measurement():\n # Now make the linear fit for the ramp\n slope, intercept, r_value, p_value, std_err = stats.linregress(voltage_list[2:], values_list[2:])\n # TODO: make some comparision if it is ok, write this to a separate file etc.\n rint = 1./slope\n self.rintslopes.append([xvalue, rint, voltage_list, values_list ,slope, intercept, r_value, p_value, std_err]) # so everything is saved in the end\n else:\n self.main.queue_to_main.put({\"MeasError\": \"Steady state could not be reached for the Rint measurement\"})\n\n self.main.change_value(voltage_device, \"set_voltage\", 0)\n self.main.change_value(voltage_device, \"set_output\", \"OFF\") # Sets the output of the device to off\n self.main.config_setup(device_dict, [(\"set_zero_check\", \"ON\")]) # unconfig elmeter\n\n if write_to_main: # Writes data to the main, or not\n self.main.measurement_data[str(\"Rint\")][0] = np.append(self.main.measurement_data[str(\"Rint\")][0],[float(xvalue)])\n self.main.measurement_data[str(\"Rint\")][1] = np.append(self.main.measurement_data[str(\"Rint\")][1],[float(rint)])\n self.main.queue_to_main.put({str(\"Rint\"): [float(xvalue), float(rint)]})\n\n return rint\n\n def do_Idiel(self, xvalue = -1, samples = 5, write_to_main = True):\n '''Does the idiel measurement'''\n # TODO: Idiel wrong!!! elmeter instead of SMU2 umschreiben beim switching!!!!\n device_dict = self.elmeter\n config_commands = [(\"set_zero_check\", \"ON\"), (\"set_measure_current\", \"\"), (\"set_zero_check\", \"OFF\")]\n\n if not self.main.stop_measurement():\n if not self.switching.switch_to_measurement(\"Idiel\"):\n self.stop_everything()\n return\n self.main.config_setup(device_dict, config_commands) # config the elmeter\n self.main.steady_state_check(device_dict, max_slope=1e-6, wait=0, samples=2, Rsq=0.5, check_complience=False) # Is a dynamic waiting time for the measuremnt\n\n value = self.__do_simple_measurement(\"Idiel\", device_dict, xvalue, samples,write_to_main=write_to_main)\n self.main.config_setup(device_dict, [(\"set_zero_check\", \"ON\")]) # unconfig elmeter\n\n return value\n\n def do_Istrip(self, xvalue = -1, samples = 5, write_to_main = True):\n '''Does the istrip measurement'''\n device_dict = self.elmeter\n d=device_dict # alias for faster writing\n if not self.main.stop_measurement():\n if not self.switching.switch_to_measurement(\"Istrip\"):\n self.stop_everything()\n return\n config_commands = [(\"set_zero_check\", \"ON\"), (\"set_measure_current\", \"\"), (\"set_zero_check\", \"OFF\")]\n self.main.config_setup(device_dict, config_commands) # config the elmeter\n self.main.steady_state_check(device_dict, max_slope=1e-6, wait=0, samples=2, Rsq=0.5, check_complience=False) # Is a dynamic waiting time for the measuremnt\n\n value = self.__do_simple_measurement(\"Istrip\", device_dict, xvalue, samples, write_to_main=write_to_main)\n self.main.config_setup(device_dict, [(\"set_zero_check\", \"ON\")]) # unconfig elmeter\n self.last_istrip_pad = xvalue\n return value\n\n def do_Idark(self, xvalue = -1, samples = 5, write_to_main = True):\n '''Does the idark measurement'''\n device_dict = self.bias_SMU\n if not self.main.stop_measurement():\n if not self.switching.switch_to_measurement(\"Idark\"):\n self.stop_everything()\n return\n self.main.steady_state_check(device_dict, max_slope=1e-6, wait=0, samples=2, Rsq=0.5) # Is a dynamic waiting time for the measuremnt\n\n value = self.__do_simple_measurement(\"Idark\", device_dict, xvalue, samples, write_to_main=write_to_main)\n return value\n else:\n return None\n\n def do_Cint(self, xvalue = -1, samples = 5, freqscan = False, write_to_main = True):\n '''Does the cint measurement'''\n device_dict = self.LCR_meter\n if not self.main.stop_measurement():\n if not self.switching.switch_to_measurement(\"Cint\"):\n self.stop_everything()\n return\n sleep(0.2) # Is need due to some stray capacitances which corrupt the measurement\n self.main.steady_state_check(device_dict, max_slope=1e-6, wait=0, samples=5,Rsq=0.5, check_complience=False) # Is a dynamic waiting time for the measuremnt\n value = self.__do_simple_measurement(\"Cint\", device_dict, xvalue, samples, write_to_main=not freqscan)\n return value\n\n def do_CintAC(self, xvalue= -1, samples=5, freqscan=False, write_to_main=True):\n '''Does the cint measurement on the AC strips'''\n device_dict = self.LCR_meter\n if not self.main.stop_measurement():\n if not self.switching.switch_to_measurement(\"CintAC\"):\n self.stop_everything()\n return\n sleep(0.2) #Because fuck you thats why. (Brandbox to LCR meter)\n self.main.steady_state_check(device_dict, max_slope=1e-6, wait=0, samples=2, Rsq=0.5,\n check_complience=False) # Is a dynamic waiting time for the measuremnt\n value = self.__do_simple_measurement(\"CintAC\", device_dict, xvalue, samples, write_to_main=not freqscan)\n return value\n\n def do_Cac(self, xvalue = -1, samples = 5, freqscan = False, write_to_main = True):\n '''Does the cac measurement'''\n device_dict = self.LCR_meter\n if not self.main.stop_measurement():\n if not self.switching.switch_to_measurement(\"Cac\"):\n self.stop_everything()\n return\n sleep(0.2) # Is need due to some stray capacitances which corrupt the measurement\n self.main.steady_state_check(device_dict, max_slope=1e-6, wait=0, samples=5,Rsq=0.5, check_complience=False) # Is a dynamic waiting time for the measuremnt\n value = self.__do_simple_measurement(\"Cac\", device_dict, xvalue, samples, write_to_main=not freqscan)\n return value\n\n def do_Cback(self, xvalue = -1, samples = 5, freqscan = False, write_to_main = True):\n '''Does a capacitance measurement from one strip to the backside'''\n device_dict = self.LCR_meter\n if not self.main.stop_measurement():\n if not self.switching.switch_to_measurement(\"Cback\"):\n self.stop_everything()\n return\n sleep(0.2) # Is need due to some stray capacitances which corrupt the measurement\n self.main.steady_state_check(device_dict, max_slope=1e-6, wait=0, samples=5, Rsq=0.5, check_complience=False) # Is a dynamic waiting time for the measuremnt\n value = self.__do_simple_measurement(\"Cback\", device_dict, xvalue, samples, write_to_main=not freqscan)\n return value\n", "sub_path": "modules/measurement_plugins/stripscan.py", "file_name": "stripscan.py", "file_ext": "py", "file_size_in_byte": 34059, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.path.append", "line_number": 7, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "VisaConnectWizard.VisaConnectWizard", "line_number": 13, "usage_type": "call"}, {"api_name": "scipy.stats.linregress", "line_number": 469, "usage_type": "call"}, {"api_name": "scipy.stats", "line_number": 469, "usage_type": "name"}]} +{"seq_id": "458104462", "text": "import cv2\nimport numpy as np\n#import matplotlib.pyplot as plt\n\n\n\n\n# Face Detection\nclass FaceDetection:\n\n\t# Function to detect face using OpenCV\n\tdef detect_face(self, img):\n\t\t# Convert the test image to gray scale as opencv face detector expects gray images\n\t\tself.gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\t\t\n\t\t#===================================================================\n\t\t# Load OpenCV face detector:\n\t\t\n\t\t# LBP\n\t\t# face_cascade = cv2.CascadeClassifier('D:/Project/lbpcascade/lbpcascade_frontalface.xml')\n\n\t\t# Haar classifier\n\t\tself.face_cascade = cv2.CascadeClassifier('../haarcascades/haarcascade_frontalface_default.xml')\n\t\t\n\n\t\t# Detect multiscale images(some images may be closer to camera than others) \n\t\t# Result : [] of faces ( type: np.ndarray: checked while debugging )\n\t\tself.faces = self.face_cascade.detectMultiScale(self.gray, scaleFactor=1.2, minNeighbors=5);\n\t\t\n\t\t# No faces detected: Return None\n\t\tif (len(self.faces) == 0):\n\t\t\treturn None, None, 0\n\n\t\t#===================================================================\n\t\t# Print the number of faces found\n\t\t# print('Faces found: ', len(self.faces))\n\t\t\n\t\t\n\t\t# Go over list of faces and draw them as rectangles on original colored\n\t\tfor (x, y, w, h) in self.faces:\n\t\t\tcv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\t\t\n\t\t# Convert image to RGB and show image\n\t\t# plt.imshow(convertToRGB(img))\n\t\t\n\t\t# Under the assumption that there will be only one face, extract the face area\n\t\tx, y, w, h = self.faces[0]\n\n\t\t# Return only the face part of the image\n\t\treturn self.gray[y:y+w, x:x+h], self.faces[0], len(self.faces)\n\n\n# FaceDetection().detect_face( cv2.imread('D:/Project/images/face2.jpg') )\n", "sub_path": "src/face_detection.py", "file_name": "face_detection.py", "file_ext": "py", "file_size_in_byte": 1681, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "cv2.cvtColor", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.CascadeClassifier", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 41, "usage_type": "call"}]} +{"seq_id": "276586102", "text": "#!/usr/bin/env python\n\"\"\"\ntest_nn_raw.py\n\nCopyright (c) 2017 Idiap Research Institute, http://www.idiap.ch/\nWritten by Weipeng He \n\nThis file is part of \"Neural Network based Sound Source Localization Models\".\n\n\"Neural Network based Sound Source Localization Models\" is free software:\nyou can redistribute it and/or modify it under the terms of the BSD 3-Clause\nLicense.\n\n\"Neural Network based Sound Source Localization Models\" is distributed in\nthe hope that it will be useful, but WITHOUT ANY WARRANTY; without even\nthe implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR\nPURPOSE. See the BSD 3-Clause License for more details.\n\"\"\"\n\nimport sys\nimport os\nimport argparse\n\nimport numpy as np\nimport torch\nfrom torch.autograd import Variable\n\nimport apkit\n\nsys.path.append('..')\nimport common\n\nimport archs\n\n_WAV_SUFFIX = '.wav'\n\ndef _predict(net, x, batch=100):\n result = []\n for i in xrange(0, len(x), batch):\n j = min(i + batch, len(x))\n y = Variable(torch.from_numpy(x[i:j])).cuda()\n outputs = net(y)\n result.append(outputs.cpu().data.numpy())\n return np.concatenate(result)\n\ndef _pad_context(sig, offset, size, ctx_size, ahd_size):\n nch, nsamples = sig.shape\n c_start = max(0, offset - ctx_size)\n n_ctx_pad = c_start - (offset - ctx_size)\n c_end = min(nsamples, offset + size + ahd_size)\n n_ahd_pad = (offset + size + ahd_size) - c_end\n if n_ctx_pad > 0 or n_ahd_pad > 0:\n res = np.concatenate((np.zeros((nch, n_ctx_pad)),\n sig[:,c_start:c_end],\n np.zeros((nch, n_ahd_pad))),\n axis=1)\n else:\n res = sig[:,c_start:c_end]\n return res\n\ndef _load_feature(datafile, extract_ft, win_size, hop_size, n_ctx, n_ahd):\n fs, sig = apkit.load_wav(datafile)\n nch, nsamples = sig.shape\n feat = np.array([extract_ft(fs, _pad_context(sig, o, win_size,\n n_ctx * win_size / 8,\n n_ahd * win_size / 8))\n for o in range(0, nsamples - win_size + 1, hop_size)])\n return feat\n\ndef main(test_path, model, extract_ft, win_size, hop_size, n_ctx, n_ahd, method,\n add_sns, batch):\n # init net\n net = archs.load_module(model)\n if add_sns:\n net = archs.AddConstantSns(net)\n print >> sys.stderr, net\n net.eval()\n net.cuda()\n\n # create result folder\n rdir = os.path.join(test_path, 'results', method)\n os.makedirs(rdir)\n\n # load and save doas\n doa = common.get_hmap_doas()\n np.save(os.path.join(rdir, 'doas'), doa)\n\n # iterate through all data\n ddir = os.path.join(test_path, 'data')\n for f in os.listdir(ddir):\n if f.endswith(_WAV_SUFFIX):\n name = f[:-len(_WAV_SUFFIX)]\n print >> sys.stderr, name\n feat = _load_feature(os.path.join(ddir, f), extract_ft, win_size,\n hop_size, n_ctx, n_ahd)\n odtype = feat.dtype\n feat = feat.astype('float32', copy=False)\n if np.issubdtype(odtype, np.integer):\n feat /= abs(float(np.iinfo(odtype).min)) #normalize\n\n # prediction\n pred = _predict(net, feat, batch)\n np.save(os.path.join(rdir, name), np.moveaxis(pred, -1, 0))\n\n_FEATURES = {'stft' : common.FeatureSTFT}\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Test multi-source NN model '\n 'frow raw wav data')\n parser.add_argument('test', metavar='TEST_PATH', type=str,\n help='path to test data and feature')\n parser.add_argument('model', metavar='MODEL_PATH', type=str,\n help='path to trained model')\n parser.add_argument('-n', '--feature', metavar='FEATURE', type=str,\n required=True, choices=_FEATURES.keys(),\n help='feature extraction method')\n parser.add_argument('-m', '--method', metavar='METHOD', type=str,\n required=True, help='method name')\n parser.add_argument('-w', '--window-size', metavar='WIN_SIZE',\n type=int, default=2048,\n help='(default 2048) analysis window size')\n parser.add_argument('-o', '--hop-size', metavar='HOP_SIZE', type=int,\n default=1024,\n help='(default 1024) hop size, number of samples between windows')\n parser.add_argument('--wframes-per-block', metavar='N_WFRAME', type=int,\n default=4, help='(default 4) number of whole frames in on block')\n parser.add_argument('--context-frames', metavar='N_CTX', type=int,\n default=0, help='number of frames of context')\n parser.add_argument('--ahead-frames', metavar='N_AHD', type=int,\n default=0, help='number of frames to look ahead')\n parser.add_argument('--add-sns', action='store_true',\n help='add constant sns to output')\n parser.add_argument('--batch-size', metavar='BATCH', type=int,\n default=100, help='size of a batch')\n args = parser.parse_args()\n\n if args.feature == 'stft':\n frame_size = args.window_size / args.wframes_per_block\n extract_ft = _FEATURES[args.feature](frame_size, frame_size / 2,\n min_freq=100, max_freq=8000)\n\n main(args.test, args.model, extract_ft, args.window_size, args.hop_size,\n args.context_frames, args.ahead_frames, args.method, args.add_sns,\n args.batch_size)\n\n# -*- Mode: Python -*-\n# vi:si:et:sw=4:sts=4:ts=4\n\n", "sub_path": "test_nn_raw.py", "file_name": "test_nn_raw.py", "file_ext": "py", "file_size_in_byte": 5738, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.path.append", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "torch.autograd.Variable", "line_number": 41, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 55, "usage_type": "call"}, {"api_name": "apkit.load_wav", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 64, "usage_type": "call"}, {"api_name": "archs.load_module", "line_number": 73, "usage_type": "call"}, {"api_name": "archs.AddConstantSns", "line_number": 75, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 81, "usage_type": "call"}, {"api_name": "os.path", "line_number": 81, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 82, "usage_type": "call"}, {"api_name": "common.get_hmap_doas", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 86, "usage_type": "call"}, {"api_name": "os.path", "line_number": 86, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 90, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 93, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "numpy.issubdtype", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.integer", "line_number": 98, "usage_type": "attribute"}, {"api_name": "numpy.iinfo", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "numpy.moveaxis", "line_number": 103, "usage_type": "call"}, {"api_name": "common.FeatureSTFT", "line_number": 105, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 108, "usage_type": "call"}]} +{"seq_id": "148838537", "text": "# Copyright 2020 Alexis Lopez Zubieta\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the \"Software\"),\n# to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\nimport fnmatch\nimport logging\nimport os\nimport re\nimport stat\n\nfrom packaging import version\nfrom functools import reduce\n\nfrom appimagebuilder.common.file_test import is_elf\nfrom .base_helper import BaseHelper\nfrom appimagebuilder.commands.patchelf import PatchElf, PatchElfError\n\n\nclass InterpreterHandlerError(RuntimeError):\n pass\n\n\nclass Interpreter(BaseHelper):\n def __init__(self, app_dir, app_dir_cache):\n super().__init__(app_dir, app_dir_cache)\n\n self.priority = 100\n self.patch_elf = PatchElf()\n self.patch_elf.logger.level = logging.WARNING\n self.interpreters = {}\n\n def get_glibc_path(self) -> str:\n paths = self.app_dir_cache.find(\"*/libc.so.*\")\n if not paths:\n raise InterpreterHandlerError(\"Unable to find libc.so\")\n path = paths[0]\n\n logging.info(\"Libc found at: %s\" % os.path.relpath(path, self.app_dir))\n return path\n\n def configure(self, app_run):\n app_run.env[\"PATH\"] = \":\".join(\n [\"$APPDIR/%s:$PATH\" % path for path in self._get_bin_paths()]\n )\n\n app_run.env[\"APPDIR_LIBRARY_PATH\"] = \":\".join(\n [\"$APPDIR/%s\" % path for path in self._get_appdir_library_paths()]\n )\n app_run.env[\"LIBC_LIBRARY_PATH\"] = \":\".join(\n [\"$APPDIR/%s\" % path for path in self._get_libc_library_paths()]\n )\n\n glibc_path = self.get_glibc_path()\n glibc_version = self.guess_libc_version(glibc_path)\n app_run.env[\"APPDIR_LIBC_VERSION\"] = glibc_version\n\n self._patch_executables_interpreter(app_run.env[\"APPIMAGE_UUID\"])\n app_run.env[\"SYSTEM_INTERP\"] = \":\".join(self.interpreters.keys())\n\n def _get_appdir_library_paths(self):\n paths = self.app_dir_cache.find(\"*\", attrs=[\"is_lib\"])\n # only dir names are relevant\n paths = set(os.path.dirname(path) for path in paths)\n\n # make all paths relative to app_dir\n paths = [os.path.relpath(path, self.app_dir) for path in paths]\n\n # exclude libc partition paths\n paths = [path for path in paths if not path.startswith(\"opt/libc\")]\n\n # exclude qt5 plugins paths\n paths = [path for path in paths if \"qt5/plugins\" not in path]\n\n # exclude perl paths\n paths = [path for path in paths if \"/perl/\" not in path]\n paths = [path for path in paths if \"/perl-base/\" not in path]\n\n return paths\n\n def _get_libc_library_paths(self):\n paths = self.app_dir_cache.find(\"*\", attrs=[\"is_lib\"])\n\n # only dir names are relevant\n paths = set(os.path.dirname(path) for path in paths)\n\n # make all paths relative to app_dir\n paths = [os.path.relpath(path, self.app_dir) for path in paths]\n\n # exclude libc partition paths\n paths = [path for path in paths if path.startswith(\"opt/libc\")]\n\n return paths\n\n def _load_ld_conf_file(self, file):\n paths = set()\n with open(file, \"r\") as fin:\n for line in fin.readlines():\n if line.startswith(\"/\"):\n paths.add(line.strip())\n return paths\n\n def _set_execution_permissions(self, path):\n os.chmod(\n path,\n stat.S_IRWXU | stat.S_IXGRP | stat.S_IRGRP | stat.S_IXOTH | stat.S_IROTH,\n )\n\n @staticmethod\n def guess_libc_version(loader_path):\n glib_version_re = re.compile(r\"GLIBC_(?P\\d+\\.\\d+\\.?\\d*)\")\n with open(loader_path, \"rb\") as f:\n content = str(f.read())\n glibc_version_strings = glib_version_re.findall(content)\n if glibc_version_strings:\n glibc_version_strings = map(version.parse, glibc_version_strings)\n max_glibc_version = reduce(\n (lambda x, y: max(x, y)), glibc_version_strings\n )\n return str(max_glibc_version)\n else:\n raise InterpreterHandlerError(\"Unable to determine glibc version\")\n\n def _patch_executables_interpreter(self, uuid):\n for bin in self.app_dir_cache.find(\"*\", attrs=[\"pt_interp\"]):\n self._set_interpreter(bin, uuid)\n\n def _set_interpreter(self, file, uuid):\n original_interpreter = self.app_dir_cache.cache[file][\"pt_interp\"]\n if original_interpreter.startswith(\"/tmp/appimage-\"):\n # skip, the binary has been patched already\n return\n try:\n patchelf_command = PatchElf()\n patchelf_command.log_stderr = False\n patchelf_command.log_stdout = False\n\n apprun_interpreter = self._gen_interpreter_link_path(\n original_interpreter, uuid\n )\n if original_interpreter and original_interpreter != apprun_interpreter:\n # only include interpreters from standard paths\n if original_interpreter.startswith(\"/lib\"):\n self.interpreters[original_interpreter] = apprun_interpreter\n logging.info(\n \"Replacing PT_INTERP on: %s\" % os.path.relpath(file, self.app_dir)\n )\n logging.debug(\n '\\t\"%s\" => \"%s\"' % (original_interpreter, apprun_interpreter)\n )\n patchelf_command.set_interpreter(file, apprun_interpreter)\n self.app_dir_cache.cache[file][\"pt_interp\"] = apprun_interpreter\n except PatchElfError:\n pass\n\n @staticmethod\n def _gen_interpreter_link_path(real_interpreter, uuid):\n return \"/tmp/appimage-%s-%s\" % (uuid, os.path.basename(real_interpreter))\n\n def _get_bin_paths(self):\n paths = self.app_dir_cache.find(\"*\", attrs=[\"is_bin\"])\n # only dir names are relevant\n paths = set(os.path.dirname(path) for path in paths)\n\n # make all paths relative to app_dir\n paths = [os.path.relpath(path, self.app_dir) for path in paths]\n\n # exclude libc partition paths\n paths = [path for path in paths if not path.startswith(\"opt/libc\")]\n\n return paths\n", "sub_path": "appimagebuilder/app_dir/runtime/helpers/interpreter.py", "file_name": "interpreter.py", "file_ext": "py", "file_size_in_byte": 6673, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "base_helper.BaseHelper", "line_number": 30, "usage_type": "name"}, {"api_name": "appimagebuilder.commands.patchelf.PatchElf", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.WARNING", "line_number": 36, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 91, "usage_type": "call"}, {"api_name": "os.path", "line_number": 91, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 94, "usage_type": "call"}, {"api_name": "os.path", "line_number": 94, "usage_type": "attribute"}, {"api_name": "os.chmod", "line_number": 110, "usage_type": "call"}, {"api_name": "stat.S_IRWXU", "line_number": 112, "usage_type": "attribute"}, {"api_name": "stat.S_IXGRP", "line_number": 112, "usage_type": "attribute"}, {"api_name": "stat.S_IRGRP", "line_number": 112, "usage_type": "attribute"}, {"api_name": "stat.S_IXOTH", "line_number": 112, "usage_type": "attribute"}, {"api_name": "stat.S_IROTH", "line_number": 112, "usage_type": "attribute"}, {"api_name": "re.compile", "line_number": 117, "usage_type": "call"}, {"api_name": "packaging.version.parse", "line_number": 122, "usage_type": "attribute"}, {"api_name": "packaging.version", "line_number": 122, "usage_type": "name"}, {"api_name": "functools.reduce", "line_number": 123, "usage_type": "call"}, {"api_name": "appimagebuilder.commands.patchelf.PatchElf", "line_number": 140, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 151, "usage_type": "call"}, {"api_name": "os.path.relpath", "line_number": 152, "usage_type": "call"}, {"api_name": "os.path", "line_number": 152, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 154, "usage_type": "call"}, {"api_name": "appimagebuilder.commands.patchelf.PatchElfError", "line_number": 159, "usage_type": "name"}, {"api_name": "os.path.basename", "line_number": 164, "usage_type": "call"}, {"api_name": "os.path", "line_number": 164, "usage_type": "attribute"}, {"api_name": "os.path.dirname", "line_number": 169, "usage_type": "call"}, {"api_name": "os.path", "line_number": 169, "usage_type": "attribute"}, {"api_name": "os.path.relpath", "line_number": 172, "usage_type": "call"}, {"api_name": "os.path", "line_number": 172, "usage_type": "attribute"}]} +{"seq_id": "125151599", "text": "# -*- coding: utf-8 -*-\nfrom scrapy.downloadermiddlewares.useragent import UserAgentMiddleware\nimport random\n\n\nclass RandomUserAgentMiddleware(UserAgentMiddleware):\n user_agent_list = [\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36'\n ]\n\n def process_request(self, request, spider):\n user_agent = random.choice(self.user_agent_list)\n if user_agent:\n request.headers.setdefault('User-Agent', user_agent)\n", "sub_path": "doubanmovie/doubanmovie/middlewares/UserAgentMiddleware.py", "file_name": "UserAgentMiddleware.py", "file_ext": "py", "file_size_in_byte": 509, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "scrapy.downloadermiddlewares.useragent.UserAgentMiddleware", "line_number": 6, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "164582585", "text": "from django.shortcuts import render, get_object_or_404\nfrom .models import Post, Comment\nfrom django.core.paginator import Paginator, EmptyPage,PageNotAnInteger\nfrom django.core.mail import send_mail\nfrom django.views.generic import ListView\n\nfrom django.db.models import Count\nfrom .forms import EmailPostForm, CommentForm, SearchForm\nfrom taggit.models import Tag\nfrom haystack.query import SearchQuerySet\n\n\nclass PostListView(ListView):\n queryset = Post.published.all()\n context_object_name = 'posts'\n paginate_by = 3\n template_name = 'blog/post/list.html'\n\n##############################################\n# Old version of post_list\n##############################################\n\n# def post_list(request):\n# posts = Post.published.all()\n# return render(request,'blog/post/list.html',{'posts': posts})\n\n##############################################\n# New version of post_list\n##############################################\n\ndef post_list(request, tag_slug=None):\n object_list = Post.published.all()\n tag = None\n\n if tag_slug:\n tag = get_object_or_404(Tag, slug=tag_slug)\n object_list = object_list.filter(tags__in=[tag])\n\n paginator = Paginator(object_list, 3) # 3 posts in each page\n page = request.GET.get('page')\n try:\n posts = paginator.page(page)\n except PageNotAnInteger:\n # If page is not an integer deliver the first page\n posts = paginator.page(1)\n except EmptyPage:\n # If page is out of range deliver last page of results\n posts = paginator.page(paginator.num_pages)\n return render(request,'blog/post/list.html',{'page': page,\n 'posts': posts,\n 'tag': tag})\n\n\ndef post_detail(request, year, month, day, post):\n post = get_object_or_404(Post, slug=post,\n status='published',\n publish__year=year,\n publish__month=month,\n publish__day=day)\n # List of active comments for this post\n comments = post.comments.filter(active=True)\n # new_comment = None\n if request.method == 'POST':\n # A comment was posted\n comment_form = CommentForm(data=request.POST)\n if comment_form.is_valid():\n # Create Comment object but don't save to database\n new_comment = comment_form.save(commit=False)\n # Assign the current post to the comment\n new_comment.post = post\n # Save the comment to the database\n new_comment.save()\n else:\n comment_form = CommentForm()\n\n # List of similar posts\n post_tags_ids = post.tags.values_list('id', flat=True)\n similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)\n similar_posts = similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags','-publish')[:4]\n\n return render(request,'blog/post/detail.html',{'post': post,\n 'comments': comments,\n 'comment_form': comment_form,\n 'similar_posts': similar_posts})\n\n##############################################\n## From Chapter 2\n##############################################\n\ndef post_share(request, post_id):\n # Retrieve post by id\n post = get_object_or_404(Post, id=post_id, status='published')\n sent = False\n if request.method == 'POST':\n # Form was submitted\n form = EmailPostForm(request.POST) ## request.POST contains the submitted data\n if form.is_valid():\n # Form fields passed validation\n cd = form.cleaned_data\n # ... send email\n post_url = request.build_absolute_uri(post.get_absolute_url())\n subject = '{} ({}) recommends you reading \"{}\"'.format(cd['name'], cd['email'], post.title)\n message = 'Read \"{}\" at {}\\n\\n{}\\'s comments: {}'.format(post.title, post_url, cd['name'], cd['comments'])\n send_mail(subject, message, 'admin@myblog.com',[cd['to']])\n sent = True\n else:\n form = EmailPostForm() ## Displays an empty form\n return render(request, 'blog/post/share.html', {'post': post,\n 'form': form,\n 'sent': sent})\n\n\n# Indenting is important here\n# If return is at same level as if form.is_valid(): then no request at all when no query in the GET\n # Error message: The view blog.views.post_search didn't return an HttpResponse object. It returned None instead.\n# If return is at same level as if 'query' in request.GET : then when no query, there's no cd nor results either\n # Error message: local variable 'cd' referenced before assignment\n# Solution:\n # 1. Created another possible output to exit the IF: just render the form return render(request, 'blog/post/search.html', {'form': form,})\n # Solution from Stack overflow: https://stackoverflow.com/questions/37482461/unboundlocalerror-at-blog-search-local-variable-cd-referenced-before-assignm\n # 2. Initiated cd and results variables (https://github.com/PacktPublishing/Django-2-by-Example/blob/master/Chapter03/mysite/blog/views.py)\n\n\ndef post_search(request):\n form = SearchForm()\n cd = None\n results = []\n total_results = 0\n if 'query' in request.GET:\n form = SearchForm(request.GET)\n if form.is_valid():\n cd = form.cleaned_data\n results = SearchQuerySet().models(Post).filter(content=cd['query']).load_all()\n # count total results\n total_results = results.count()\n return render(request,\n 'blog/post/search.html',\n {'form': form,\n 'cd': cd,\n 'results': results,\n 'total_results': total_results})\n #return render(request, 'blog/post/search.html', {'form': form,})\n", "sub_path": "mysite/blog/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 6246, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.views.generic.ListView", "line_number": 13, "usage_type": "name"}, {"api_name": "models.Post.published.all", "line_number": 14, "usage_type": "call"}, {"api_name": "models.Post.published", "line_number": 14, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 14, "usage_type": "name"}, {"api_name": "models.Post.published.all", "line_number": 32, "usage_type": "call"}, {"api_name": "models.Post.published", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 32, "usage_type": "name"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 36, "usage_type": "call"}, {"api_name": "taggit.models.Tag", "line_number": 36, "usage_type": "argument"}, {"api_name": "django.core.paginator.Paginator", "line_number": 39, "usage_type": "call"}, {"api_name": "django.core.paginator.PageNotAnInteger", "line_number": 43, "usage_type": "name"}, {"api_name": "django.core.paginator.EmptyPage", "line_number": 46, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 49, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 55, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 55, "usage_type": "argument"}, {"api_name": "forms.CommentForm", "line_number": 65, "usage_type": "call"}, {"api_name": "forms.CommentForm", "line_number": 74, "usage_type": "call"}, {"api_name": "models.Post.published.filter", "line_number": 78, "usage_type": "call"}, {"api_name": "models.Post.published", "line_number": 78, "usage_type": "attribute"}, {"api_name": "models.Post", "line_number": 78, "usage_type": "name"}, {"api_name": "django.db.models.Count", "line_number": 79, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 81, "usage_type": "call"}, {"api_name": "django.shortcuts.get_object_or_404", "line_number": 92, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 92, "usage_type": "argument"}, {"api_name": "forms.EmailPostForm", "line_number": 96, "usage_type": "call"}, {"api_name": "django.core.mail.send_mail", "line_number": 104, "usage_type": "call"}, {"api_name": "forms.EmailPostForm", "line_number": 107, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 108, "usage_type": "call"}, {"api_name": "forms.SearchForm", "line_number": 125, "usage_type": "call"}, {"api_name": "forms.SearchForm", "line_number": 130, "usage_type": "call"}, {"api_name": "models.Post", "line_number": 133, "usage_type": "argument"}, {"api_name": "haystack.query.SearchQuerySet", "line_number": 133, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 136, "usage_type": "call"}]} +{"seq_id": "102755548", "text": "import pandas as pd\nfrom sklearn.preprocessing import LabelEncoder\n\n\nclass Preprocess():\n\n def __init__(self):\n print(\"Preprocess object created\")\n\n def fillna(self, data, fill_strategies):\n for column, strategy in fill_strategies.items():\n if strategy == 'None':\n data[column] = data[column].fillna('None')\n elif strategy == 'Zero':\n data[column] = data[column].fillna(0)\n elif strategy == 'Mode':\n data[column] = data[column].fillna(data[column].mode()[0])\n elif strategy == 'Mean':\n data[column] = data[column].fillna(data[column].mean())\n elif strategy == 'Median':\n data[column] = data[column].fillna(data[column].median())\n else:\n print(\"{}: There is no such thing as preprocess strategy\".format(strategy))\n\n return data\n\n def drop(self, data, drop_strategies):\n for column, strategy in drop_strategies.items():\n data = data.drop(labels=[column], axis=strategy)\n\n return data\n\n def feature_engineering(self, data, engineering_strategies=1):\n if engineering_strategies == 1:\n return self._feature_engineering1(data)\n\n return data\n\n def _feature_engineering1(self, data):\n\n data = self._base_feature_engineering(data)\n\n data['FareBin'] = pd.qcut(data['Fare'], 4)\n\n data['AgeBin'] = pd.cut(data['Age'].astype(int), 5)\n\n drop_strategy = {'Age': 1, # 1 indicate axis 1(column)\n 'Name': 1,\n 'Fare': 1}\n data = self.drop(data, drop_strategy)\n\n return data\n\n def _base_feature_engineering(self, data):\n data['FamilySize'] = data['SibSp'] + data['Parch'] + 1\n\n data['IsAlone'] = 1\n data.loc[(data['FamilySize'] > 1), 'IsAlone'] = 0\n\n data['Title'] = data['Name'].str.split(\", \", expand=True)[1].str.split('.', expand=True)[0]\n min_length = 10\n title_names = (data['Title'].value_counts() < min_length)\n data['Title'] = data['Title'].apply(lambda x: 'Misc' if title_names.loc[x] is True else x)\n\n return data\n\n def _label_encoder(self, data):\n labelEncoder = LabelEncoder()\n for column in data.columns.values:\n if 'int64' == data[column].dtype or 'float64' == data[column].dtype or 'int64' == data[column].dtype:\n continue\n labelEncoder.fit(data[column])\n data[column] = labelEncoder.transform(data[column])\n return data\n\n def _get_dummies(self, data, prefered_columns=None):\n\n if prefered_columns is None:\n columns=data.columns.values\n non_dummies = None\n else:\n non_dummies = [col for col in data.columns.values if col not in prefered_columns]\n\n columns = prefered_columns\n\n dummies_data = [pd.get_dummies(data[col], prefix=col) for col in columns]\n\n if non_dummies is not None:\n for non_dummy in non_dummies:\n dummies_data.append(data[non_dummy])\n\n return pd.concat(dummies_data, axis=1)\n\nclass PreprocessStrategy():\n def __init__(self):\n self.data = None\n self._preprocessor = Preprocess()\n\n def strategy(self, data, strategy_type=\"strategy1\"):\n self.data = data\n if strategy_type == 'strategy1':\n self._strategy1()\n elif strategy_type == 'strategy2':\n self._strategy2()\n\n return self.data\n\n def _base_strategy(self):\n drop_strategy = {'PassengerId': 1, # 1 indicate axis 1(column)\n 'Cabin': 1,\n 'Ticket': 1}\n self.data = self._preprocessor.drop(self.data, drop_strategy)\n\n fill_strategy = {'Age': 'Median',\n 'Fare': 'Median',\n 'Embarked': 'Mode'}\n self.data = self._preprocessor.fillna(self.data, fill_strategy)\n\n self.data = self._preprocessor.feature_engineering(self.data, 1)\n\n self.data = self._preprocessor._label_encoder(self.data)\n\n def _strategy1(self):\n self._base_strategy()\n\n self.data = self._preprocessor._get_dummies(self.data,\n prefered_columns=['Pclass', 'Sex', 'Parch',\n 'Embarked', 'Title', 'IsAlone'])\n\n def _strategy2(self):\n self._base_strategy()\n\n self.data=self._preprocessor._get_dummies(self.data,\n prefered_columns=None) # None mean that all feature will be dummied", "sub_path": "Preprocess.py", "file_name": "Preprocess.py", "file_ext": "py", "file_size_in_byte": 4679, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pandas.qcut", "line_number": 43, "usage_type": "call"}, {"api_name": "pandas.cut", "line_number": 45, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.LabelEncoder", "line_number": 68, "usage_type": "call"}, {"api_name": "pandas.get_dummies", "line_number": 86, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "71702834", "text": "import json\r\nimport requests\r\n\r\n#HUE\r\n#url = \"192.168.3.22\"\r\n#key = \"f27eacc23cb5d0f2313b6a4288e14a7\"\r\n\r\n#RaspBee\r\nurl = \"192.168.3.26\"\r\nkey = \"8629FF708F\"\r\n\r\naddress = 'http://'+url+'/api/'+key\r\nprint (address)\r\n\r\nlight = '1';\r\ninp = ''\r\n\r\nwhile inp != 'q':\r\n\tinp = input('key: ')\r\n\t\r\n\tif inp == 'y':\r\n\t\t# PUT light off\r\n\t\tresponse = requests.put(address+'/lights/'+light+'/state', data = '{\"on\" : false}')\r\n\telif inp == 'x':\r\n\t\t# PUT light on and change color\r\n\t\tparams = '{\"on\" : true, \"sat\":254, \"bri\":254,\"hue\":10000}'\r\n\t\tresponse = requests.put(address+'/lights/'+light+'/state', data = params)\r\n\t\tprint (response.status_code)\r\n\t\tprint (response.content)\r\n\telif inp == 'i':\r\n\t\t# GET lights\r\n\t\tresponse = requests.get(address+'/lights')\r\n\t\tprint (response.json())\r\n", "sub_path": "rest.py", "file_name": "rest.py", "file_ext": "py", "file_size_in_byte": 770, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "requests.put", "line_number": 23, "usage_type": "call"}, {"api_name": "requests.put", "line_number": 27, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "530972304", "text": "### * Description\n\n# Entry points for the command line scripts\n\nDESCRIPTION = ( \"Parse gene data present in GenBank records to produce a gene \"\n\"table containing summary information about each gene, such as the record of \"\n\"origin, the coding sequence, the protein sequence, hash values for the \"\n\"sequences, location, unique gene identifier, ... This utility can also merge \"\n\"similar sequences before running blastp, in order to reduce the run time when \"\n\"many very similar sequences are present. The gene table can be used by tools \"\n\"for gene clustering for example.\")\n\n### * Wishlist\n\n# pygenes parse genbank-records/* --genes gene.table --records records.table\n# pygenes hash genes.table --peptideField --hash md5 -o genes.table\n# pygenes mergePeptides genes.table --maxDissimilarity 0.05 --mapping mergedPep.mapping\n\n### * Set up\n\n### ** Import\n\nimport sys\nimport argparse\nimport shutil\nimport hashlib\nimport os\nimport collections\nimport sqlite3 as sql\nfrom Bio import SeqIO\nimport pygenes as pygenes\n\n### * Parser\n\ndef makeParser() :\n \"\"\"Prepare the parser\n\n Returns:\n ArgumentParser: An argument parser\n\n \"\"\"\n parser = argparse.ArgumentParser(description = DESCRIPTION)\n subparsers = parser.add_subparsers()\n### ** Parse GenBank records\n sp_parseGB = subparsers.add_parser(\"parseGB\",\n description = \"Parse GenBank records into \"\n \"a gene table.\",\n help = \"Parse GenBank records to a table\")\n sp_parseGB.add_argument(\"gbRecords\", metavar = \"GB_RECORD\", nargs = \"+\",\n type = str,\n help = \"GenBank records\")\n sp_parseGB.add_argument(\"-g\", \"--genes\", metavar = \"FILE\", type = str,\n help = \"Output file for gene table\")\n sp_parseGB.add_argument(\"-r\", \"--records\", metavar = \"FILE\", type = str,\n help = \"Output file for record table\")\n sp_parseGB.set_defaults(action = \"parseGB\")\n### ** Parse Ensembl records\n sp_parseEnsembl = subparsers.add_parser(\"parseEnsembl\",\n description = \"Parse Ensembl records into \"\n \"a gene table.\",\n help = \"Parse Ensembl records to a table\")\n sp_parseEnsembl.add_argument(\"ensemblRecords\",\n metavar = \"ENSEMBL_RECORD\", nargs = \"+\",\n type = str,\n help = \"Ensembl records\")\n sp_parseEnsembl.add_argument(\"-g\", \"--genes\", metavar = \"FILE\", type = str,\n help = \"Output file for gene table\")\n sp_parseEnsembl.add_argument(\"-r\", \"--records\", metavar = \"FILE\", type = str,\n help = \"Output file for record table\")\n sp_parseEnsembl.set_defaults(action = \"parseEnsembl\")\n### ** Parse EMBL records\n sp_parseEMBL = subparsers.add_parser(\"parseEMBL\",\n description = \"Parse EMBL records into \"\n \"a gene table.\",\n help = \"Parse EMBL records to a table\")\n sp_parseEMBL.add_argument(\"emblRecords\", metavar = \"EMBL_RECORD\", nargs = \"+\",\n type = str,\n help = \"EMBL records\")\n sp_parseEMBL.add_argument(\"-g\", \"--genes\", metavar = \"FILE\", type = str,\n help = \"Output file for gene table\")\n sp_parseEMBL.add_argument(\"-r\", \"--records\", metavar = \"FILE\", type = str,\n help = \"Output file for record table\")\n sp_parseEMBL.set_defaults(action = \"parseEMBL\")\n### ** Build SQL Genomes, Cds and Records tables from EMBL files\n sp_SQL_EMBL = subparsers.add_parser(\"parseEMBLtoSQL\",\n description = \"Parse EMBL files into an SQLite \"\n \"database\",\n help = \"Parse EMBL files into a new SQLite \"\n \"database\")\n sp_SQL_EMBL.add_argument(\"-o\", \"--outDb\", type = str,\n help = \"Output database (tables will be \"\n \"deleted in the database if it already exists)\")\n sp_SQL_EMBL.add_argument(\"emblFiles\", metavar = \"EMBL_FILE\", nargs = \"+\",\n type = str,\n help = \"EMBL file(s) (not compressed)\")\n sp_SQL_EMBL.set_defaults(action = \"SQL_EMBL\")\n### ** # Calculate hash digests\n # sp_hash = subparsers.add_parser(\"hash\",\n # help = \"Produce hash digest (e.g. for peptides)\")\n # sp_hash.add_argument(\"input\", metavar = \"INPUT_TABLE\", type = str,\n # help = \"Input table\")\n # sp_hash.add_argument(\"-o\", \"--output\", metavar = \"FILE\", type = str,\n # help = \"Output file for gene table\")\n # sp_hash.add_argument(\"--hash\", metavar = \"HASH_ALGORITHM\",\n # choices = [\"md5\", \"sha1\", \"sha224\", \"sha256\", \"sha384\",\n # \"sha512\"],\n # default = \"md5\",\n # help = \"Hash algorithm to use for unique sequence signature \"\n # \"(default: md5)\")\n # sp_hash.set_defaults(action = \"hash\")\n### ** Merge peptides (from gene table)\n sp_mergePep = subparsers.add_parser(\"mergePep\",\n help = \"Merge similar peptides\")\n sp_mergePep.add_argument(\"input\", metavar = \"INPUT_TABLE\", type = str,\n help = \"Input table\")\n sp_mergePep.add_argument(\"-o\", \"--output\", metavar = \"FILE\", type = str,\n help = \"Output file for gene table\")\n sp_mergePep.add_argument(\"-d\", \"--dissim\", metavar = \"FLOAT\", type = float,\n help = \"Maximum dissimilarity for merging \"\n \"(between 0 and 1)\")\n sp_mergePep.add_argument(\"-f\", \"--fasta\", metavar = \"FILE\", type = str,\n help = \"Output fasta file for merged peptides\")\n sp_mergePep.set_defaults(action = \"mergePep\")\n### ** Extract columns\n sp_extract = subparsers.add_parser(\"extract\",\n help = \"Extract some columns from a \"\n \"table. Output is sent to stdout.\")\n sp_extract.add_argument(\"inputTable\", metavar = \"TABLE\", type = str,\n help = \"Input table file\")\n sp_extract.add_argument(\"columns\", metavar = \"COLNAME\", type = str,\n nargs = \"+\",\n help = \"Name of the column(s) to extract\")\n sp_extract.add_argument(\"-t\", \"--type\", choices = [\"gene\", \"record\"],\n help = \"Table type (default: gene)\",\n default = \"gene\")\n sp_extract.set_defaults(action = \"extract\")\n### ** Count unique sequences in a fasta file\n sp_countUniqFasta = subparsers.add_parser(\"count\",\n help = \"Count unique sequences in \"\n \"a fasta file (using hash)\")\n sp_countUniqFasta.add_argument(\"inputFiles\", metavar = \"FASTA_FILE\",\n type = str, nargs = \"+\",\n help = \"Input fasta file(s)\")\n sp_countUniqFasta.set_defaults(action = \"count\")\n### ** Return parser\n return parser\n\n### * Mains\n\n### ** Main entry point (dispatch)\n\ndef main(args = None, stdout = None, stderr = None) :\n \"\"\"Main entry point\n\n Args:\n args (namespace): Namespace with script arguments, parse the command \n line arguments if None\n stdout (file): Writable stdout stream (if None, use `sys.stdout`)\n stderr (file): Writable stderr stream (if None, use `sys.stderr`)\n\n \"\"\"\n if args is None :\n parser = makeParser()\n args = parser.parse_args()\n if stdout is None :\n stdout = sys.stdout\n if stderr is None :\n stderr = sys.stderr\n dispatch = dict()\n dispatch[\"parseGB\"] = main_parseGB\n dispatch[\"parseEnsembl\"] = main_parseEnsembl\n dispatch[\"parseEMBL\"] = main_parseEMBL\n dispatch[\"SQL_EMBL\"] = main_SQL_EMBL\n dispatch[\"hash\"] = main_hash\n dispatch[\"mergePep\"] = main_mergePep\n dispatch[\"extract\"] = main_extract\n dispatch[\"count\"] = main_count\n dispatch[args.action](args, stdout, stderr)\n \n### ** Main parseGB\n\ndef main_parseGB(args, stdout, stderr) :\n if args.genes is None and args.records is None :\n msg = \"You should use at least one of -g or -r. Use -h for help.\\n\"\n stdout.write(msg)\n sys.exit(0)\n if args.genes is not None :\n geneTable = pygenes.GeneTable()\n geneTable.parseGenBankRecord(args.gbRecords)\n stderr.write(\"Calculating geneId hash\\n\")\n geneTable.writeTable(args.genes)\n if args.records is not None :\n stderr.write(\"Getting record items\\n\")\n recordTable = pygenes.RecordTable()\n for r in args.gbRecords :\n recordTable.addGenBankRecord(r)\n stderr.write(\"Writing record table\\n\")\n recordTable.writeTable(args.records)\n sys.exit(0)\n\n### ** Main parseEnsembl\n\ndef main_parseEnsembl(args, stdout, stderr) :\n if args.genes is None and args.records is None :\n msg = \"You should use at least one of -g or -r. Use -h for help.\\n\"\n stdout.write(msg)\n sys.exit(0)\n if args.genes is not None :\n geneTable = pygenes.GeneTable()\n geneTable.parseEnsemblRecord(args.ensemblRecords)\n stderr.write(\"Calculating geneId hash\\n\")\n geneTable.writeTable(args.genes)\n if args.records is not None :\n stderr.write(\"Getting record items\\n\")\n recordTable = pygenes.RecordTable()\n for r in args.ensemblRecords :\n recordTable.addEnsemblRecord(r)\n stderr.write(\"Writing record table\\n\")\n recordTable.writeTable(args.records)\n sys.exit(0)\n\n### ** Main parseEMBL\n\ndef main_parseEMBL(args, stdout, stderr) :\n # if args.genes is None and args.records is None :\n # msg = \"You should use at least one of -g or -r. Use -h for help.\\n\"\n # stdout.write(msg)\n # sys.exit(0)\n if args.genes is not None :\n geneTable = pygenes.GeneTable()\n headers = geneTable.getHeaders()\n with open(args.genes, \"w\") as fo :\n fo.write(\"#\" + \"\\t\".join(headers) + \"\\n\")\n geneTable.streamEMBLRecord(args.emblRecords, outFile = fo,\n headers = headers)\n if args.records is not None :\n msg = \"Parsing records into record table\"\n stderr.write(msg + \"\\n\")\n recordTable = pygenes.RecordTable()\n headers = recordTable.getHeaders()\n with open(args.records, \"w\") as fo :\n fo.write(\"#\" + \"\\t\".join(headers) + \"\\n\")\n for r in args.emblRecords :\n recordTable.streamEMBLRecord(r, fo, headers)\n sys.exit(0)\n\n### ** Main SQL_EMBL\n\ndef main_SQL_EMBL(args, stdout, stderr) :\n # Create the table\n dbConnection = sql.connect(args.outDb)\n cursor = dbConnection.cursor()\n cursor.execute(\"DROP TABLE IF EXISTS Genomes\")\n cursor.execute(\"CREATE TABLE Genomes (\"\n \"filename TEXT UNIQUE, \"\n \"biosample TEXT, \"\n \"organism TEXT, \"\n \"nRecords INTEGER, \"\n \"refs TEXT)\")\n cursor.execute(\"DROP TABLE IF EXISTS Records\")\n cursor.execute(\"CREATE TABLE Records (\"\n \"id TEXT UNIQUE, \"\n \"name TEXT, \"\n \"description TEXT, \"\n \"seq TEXT, \"\n \"seqLen INTEGER, \"\n \"genome_filename TEXT, \"\n \"FOREIGN KEY (genome_filename) REFERENCES Genomes(filename))\")\n cursor.execute(\"DROP TABLE IF EXISTS Peptides\")\n cursor.execute(\"CREATE TABLE Peptides (\"\n \"pepSeq TEXT UNIQUE, \"\n \"pepLen INTEGER, \"\n \"mergedPeptides_id INTEGER)\")\n cursor.execute(\"DROP TABLE IF EXISTS Cds\")\n cursor.execute(\"CREATE TABLE Cds (\"\n \"record_id TEXT, \"\n \"peptide_rowid INTEGER, \"\n \"nucSeq TEXT, \"\n \"location TEXT, \"\n \"translationTable INTEGER, \"\n \"geneName TEXT, \"\n \"productName TEXT, \"\n \"productAccNum TEXT, \"\n \"FOREIGN KEY (record_id) REFERENCES Records(id), \"\n \"FOREIGN KEY (peptide_rowid) REFERENCES Peptides(rowid))\")\n # Go through the EMBL files\n total = str(len(args.emblFiles))\n for (i, f) in enumerate(args.emblFiles) :\n stderr.write(\"Processing file \" + str(i) + \"/\" + total + \" - \" +\n os.path.basename(f) + \"\\n\")\n # Genomes table\n d = pygenes.EMBLFileInfo(f)\n if d is not None :\n cursor.execute(\"INSERT INTO Genomes (filename, biosample, \"\n \"organism, nRecords, refs) \"\n \"VALUES (\\\"{filename}\\\", \\\"{biosample}\\\", \"\n \"\\\"{organism}\\\", {nRecords}, \\\"{references}\\\" \"\n \")\".format(filename = d[\"filename\"],\n biosample = d[\"biosample\"],\n organism = d[\"organism\"],\n nRecords = str(d[\"nRecords\"]),\n references = d[\"references\"]))\n dbConnection.commit()\n # Cds and Records tables\n pygenes.parseEMBLtoSQL(f, cursor)\n dbConnection.commit()\n # Create index\n cursor.execute(\"CREATE INDEX idx_pepLen ON Peptides (pepLen)\")\n cursor.execute(\"CREATE INDEX idx_peptide_rowid ON Cds (peptide_rowid)\")\n dbConnection.commit()\n # Close the connection\n dbConnection.close()\n \n### ** Main hash\n\ndef main_hash(args, stdout, stderr) :\n geneTable = pygenes.GeneTable()\n geneTable.loadTable(args.input)\n hashFunctions = { \"md5\" : hashlib.md5,\n \"sha1\" : hashlib.sha1,\n \"sha224\" : hashlib.sha224,\n \"sha256\" : hashlib.sha256,\n \"sha384\" : hashlib.sha384,\n \"sha512\" : hashlib.sha512}\n args.hash = hashFunctions[args.hash]\n geneTable.hashPeptides(args.hash)\n geneTable.writeTable(args.output)\n sys.exit(0)\n \n### ** Main mergePep\n\ndef main_mergePep(args, stdout, stderr) :\n msg = \"Building the hash index\"\n stderr.write(msg + \"\\n\")\n hashIndex = pygenes.buildGeneTableFileIndex(args.input)\n msg = \"Building the length index\"\n stderr.write(msg + \"\\n\")\n lengthIndex = pygenes.buildLengthFileIndex(hashIndex)\n hashToMerged = dict()\n with open(args.fasta, \"w\") as fo :\n for l in lengthIndex.keys() :\n msg = \"Merging sequences of length \" + str(l)\n stderr.write(msg + \"\\n\")\n sequences = pygenes.gatherSequences(args.input, lengthIndex, l)\n mergedSequences = pygenes.mergeSequences(sequences, args.dissim)\n for (k, v) in mergedSequences.items() :\n originalHash = pygenes.md5hash(k)\n mergedHash = pygenes.md5hash(v)\n assert not hashToMerged.get(originalHash, False)\n hashToMerged[originalHash] = mergedHash\n newMerged = set(mergedSequences.values())\n for seq in newMerged :\n fo.write(\">\" + pygenes.md5hash(seq) + \"\\n\")\n fo.write(seq + \"\\n\")\n with open(args.input, \"r\") as fi :\n with open(\"tmp.\" + args.output, \"w\") as fo :\n headers = fi.readline()\n headerElements = headers.lstrip(\"#\").strip().split(\"\\t\")\n fo.write(headers)\n for line in fi :\n content = dict(zip(headerElements, line.strip().split(\"\\t\")))\n content[\"mergedPeptideHash\"] = hashToMerged[content[\"peptideHash\"]]\n fo.write(\"\\t\".join([content[x] for x in headerElements]) + \"\\n\")\n shutil.move(\"tmp.\" + args.output, args.output)\n \n### ** Main extract\n\ndef main_extract(args, stdout, stderr) :\n if args.type == \"gene\" :\n table = pygenes.GeneTable()\n else :\n assert args.type == \"record\"\n table = pygenes.RecordTable()\n table.loadTable(args.inputTable)\n stdout.write(\"#\" + \"\\t\".join(args.columns) + \"\\n\")\n columns = table.col(*args.columns)\n for x in columns :\n stdout.write(\"\\t\".join(x) + \"\\n\")\n\n### ** Main count\n\ndef main_count(args, stdout, stderr) :\n def h(string) :\n h = hashlib.md5()\n h.update(string)\n return h.hexdigest()\n cont = True\n for f in args.inputFiles :\n if cont :\n try :\n a = SeqIO.parse(f, \"fasta\")\n uniqHash = set()\n for s in a :\n uniqHash.add(h(str(s.seq)))\n stdout.write(f + \"\\t\" + str(len(uniqHash)) + \"\\n\")\n except IOError :\n cont = False\n\n", "sub_path": "pygenesScripts.py", "file_name": "pygenesScripts.py", "file_ext": "py", "file_size_in_byte": 17220, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 42, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 166, "usage_type": "attribute"}, {"api_name": "sys.stderr", "line_number": 168, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 186, "usage_type": "call"}, {"api_name": "pygenes.GeneTable", "line_number": 188, "usage_type": "call"}, {"api_name": "pygenes.RecordTable", "line_number": 194, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 199, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 207, "usage_type": "call"}, {"api_name": "pygenes.GeneTable", "line_number": 209, "usage_type": "call"}, {"api_name": "pygenes.RecordTable", "line_number": 215, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 220, "usage_type": "call"}, {"api_name": "pygenes.GeneTable", "line_number": 230, "usage_type": "call"}, {"api_name": "pygenes.RecordTable", "line_number": 239, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 245, "usage_type": "call"}, {"api_name": "sqlite3.connect", "line_number": 251, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 290, "usage_type": "call"}, {"api_name": "os.path", "line_number": 290, "usage_type": "attribute"}, {"api_name": "pygenes.EMBLFileInfo", "line_number": 292, "usage_type": "call"}, {"api_name": "pygenes.parseEMBLtoSQL", "line_number": 305, "usage_type": "call"}, {"api_name": "pygenes.GeneTable", "line_number": 317, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 319, "usage_type": "attribute"}, {"api_name": "hashlib.sha1", "line_number": 320, "usage_type": "attribute"}, {"api_name": "hashlib.sha224", "line_number": 321, "usage_type": "attribute"}, {"api_name": "hashlib.sha256", "line_number": 322, "usage_type": "attribute"}, {"api_name": "hashlib.sha384", "line_number": 323, "usage_type": "attribute"}, {"api_name": "hashlib.sha512", "line_number": 324, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 328, "usage_type": "call"}, {"api_name": "pygenes.buildGeneTableFileIndex", "line_number": 335, "usage_type": "call"}, {"api_name": "pygenes.buildLengthFileIndex", "line_number": 338, "usage_type": "call"}, {"api_name": "pygenes.gatherSequences", "line_number": 344, "usage_type": "call"}, {"api_name": "pygenes.mergeSequences", "line_number": 345, "usage_type": "call"}, {"api_name": "pygenes.md5hash", "line_number": 347, "usage_type": "call"}, {"api_name": "pygenes.md5hash", "line_number": 348, "usage_type": "call"}, {"api_name": "pygenes.md5hash", "line_number": 353, "usage_type": "call"}, {"api_name": "shutil.move", "line_number": 364, "usage_type": "call"}, {"api_name": "pygenes.GeneTable", "line_number": 370, "usage_type": "call"}, {"api_name": "pygenes.RecordTable", "line_number": 373, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 384, "usage_type": "call"}, {"api_name": "Bio.SeqIO.parse", "line_number": 391, "usage_type": "call"}, {"api_name": "Bio.SeqIO", "line_number": 391, "usage_type": "name"}]} +{"seq_id": "24251580", "text": "import torch\nimport torch.nn.functional as F\nfrom unet import UNet\nfrom ptsemseg.loader.dataloader import data_loader \nfrom torch.utils import data\nfrom PIL import Image\nimport numpy as np\ndef save_ckp(state):\n f_path = \"/media/disk2/sombit/kitti_seg/checkpoint.pt\"\n torch.save(state, f_path)\ndef load_ckp(checkpoint_fpath, model, optimizer):\n checkpoint = torch.load(checkpoint_fpath)\n model.load_state_dict(checkpoint['state_dict'])\n optim.load_state_dict(checkpoint['optimizer'])\n return model, optim, checkpoint['epoch']\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint (device)\nmodel = UNet( padding=True, up_mode='upsample').to(device)\nprint(\"Load Model\")\noptim = torch.optim.Adam(model.parameters())\n# data_loader = get_loader('kitti','seg')\n# data_path = \"/home/sombit/kitti\"\nt_loader = data_loader(\n \n is_transform=True,\n img_norm=False,\n\n)\n\ntrainloader = data.DataLoader(t_loader,\n batch_size=1, \n num_workers=2, \n shuffle=True)\n\nepochs = 100\nresume = False\nif(resume):\n model, optim, start_epoch = load_ckp(\"/media/disk2/sombit/kitti_seg/checkpoint.pt\", model, optim)\n i = start_epoch\nprint(\"Started Training\")\n# import shutil\n\n\nfor i in range(epochs):\n counter =0\n running_loss = 0.0\n for (X, y,image_path) in trainloader:\n\n X = X.to(device) # [N, 1, H, W]\n y = y.to(device) # [N, H, W] with class indices (0, 1)\n model.train()\n optim.zero_grad()\n prediction = model(X) \n n, c, h, w = prediction.size()\n nt, ht, wt = y.size()\n\n # Handle inconsistent size between input and target\n if h > ht and w > wt: # upsample labels\n y = y.unsequeeze(1)\n y = F.upsample(y, size=(h, w), mode=\"nearest\")\n y = prediction.sequeeze(1)\n elif h < ht and w < wt: # upsample images\n prediction = F.upsample(prediction, size=(ht, wt), mode=\"bilinear\")\n elif h != ht and w != wt:\n raise Exception(\"Only support upsampling\")\n\n loss = F.cross_entropy(\n prediction, y, ignore_index=250\n )\n \n # if (i==0):\n # t = y.numpy()\n # print(image_path)\n # # print(t.type)\n # img = np.asarray(y,dtype=np.uint8)\n # # print(t[1,:,:])\n # img = Image.fromarray(np.uint8(img[0,:,:]))\n\n # img.save('test.png')\n # break\n # # print('[%d, %5d] loss: %.3f' %(i + 1, counter + 1, loss))\n \n \n loss.backward()\n optim.step()\n running_loss +=loss.item()\n if counter%10==9:\n print(\"loss\",running_loss/10,\" epochs\",i+1,\"counter\",counter)\n running_loss =0.0\n counter +=1\n # if(i==0):\n # print(image_path)\n # t = y.numpy()\n # print(np.unique(t))\n # break\n\n\n\n \n\nstate_curr = {\n 'state_dict': model.state_dict(),\n 'optimizer': optim.state_dict()\n}\nsave_ckp(state_curr)\n", "sub_path": "train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 3100, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "torch.save", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 12, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 16, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 16, "usage_type": "attribute"}, {"api_name": "unet.UNet", "line_number": 18, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 20, "usage_type": "attribute"}, {"api_name": "ptsemseg.loader.dataloader.data_loader", "line_number": 23, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 30, "usage_type": "call"}, {"api_name": "torch.utils.data", "line_number": 30, "usage_type": "name"}, {"api_name": "torch.nn.functional.upsample", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 60, "usage_type": "name"}, {"api_name": "torch.nn.functional.upsample", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 63, "usage_type": "name"}, {"api_name": "torch.nn.functional.cross_entropy", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 67, "usage_type": "name"}]} +{"seq_id": "30143557", "text": "import unittest\nfrom mock import patch\nfrom board_test_utils import set_board, get_board_state_tuple\nfrom web_game import WebGame\nfrom board import Board\nfrom game_controller import GameController\nfrom human_player import HumanPlayer\nfrom random_player import RandomPlayer\nfrom td_learning_player import TDLearningPlayer\n\nclass TestWebGame(unittest.TestCase):\n def setUp(self):\n self.game = WebGame()\n\n def set_loaded(self, piece):\n self.loaded[piece] = True\n\n @patch('learning_computer_player.LearningComputerPlayer._load_file')\n def assert_start_game_does(\n self, load_mock, player_types_dict, player1_class, player2_class,\n expected_player1_loaded=False, expected_player2_loaded=False):\n load_mock.side_effect = lambda piece: self.set_loaded(piece)\n self.loaded = {Board.X: False, Board.O: False}\n\n self.assert_game_info_is(None, [], Board.X, \"---|---|---\", self.game.start_game(player_types_dict))\n\n self.assertIsInstance(self.game.player1, player1_class)\n self.assertIsInstance(self.game.player2, player2_class)\n self.assert_is_non_interactive_if_human(player_types_dict[\"x\"], self.game.player1)\n self.assert_is_non_interactive_if_human(player_types_dict[\"o\"], self.game.player2)\n self.assertEqual(expected_player1_loaded, self.loaded[Board.X])\n self.assertEqual(expected_player2_loaded, self.loaded[Board.O])\n self.assertIsInstance(self.game.controller, GameController)\n self.assertEqual(\n {Board.X: player_types_dict[\"x\"], Board.O: player_types_dict[\"o\"]},\n self.game.player_types_dict)\n\n def assert_is_non_interactive_if_human(self, player_type, player):\n if player_type == \"Human\":\n self.assertFalse(player.interactive)\n\n def assert_game_info_is(self, winner, winning_positions, turn, pieces, game_info):\n expected_game_info = \\\n {\n \"winner\": winner,\n \"winning_positions\": winning_positions,\n \"turn\": turn,\n \"board\": list(get_board_state_tuple(pieces))\n }\n self.assertEqual(expected_game_info, game_info)\n\n def test_constructor_initializes_controller(self):\n self.assertIsNone(self.game.controller)\n\n def test_constructor_initializes_players(self):\n self.assertIsNone(self.game.player1)\n self.assertIsNone(self.game.player2)\n\n def test_constructor_initialize_player_types_dict(self):\n self.assertEqual({Board.X: \"\", Board.O: \"\"}, self.game.player_types_dict)\n\n def test_start_game_sets_non_interactive_humans(self):\n self.assert_start_game_does(\n player_types_dict={\"x\": \"Human\", \"o\": \"Human\"},\n player1_class=HumanPlayer,\n player2_class=HumanPlayer)\n\n def test_start_game_loads_learning_players(self):\n self.assert_start_game_does(\n player_types_dict={\"x\": \"TD\", \"o\": \"TD\"},\n player1_class=TDLearningPlayer,\n player2_class=TDLearningPlayer,\n expected_player1_loaded=True,\n expected_player2_loaded=True)\n\n def test_start_game_loads_learning_players_once_if_same_player_type(self):\n self.assert_start_game_does(\n player_types_dict={\"x\": \"TD\", \"o\": \"Random\"},\n player1_class=TDLearningPlayer,\n player2_class=RandomPlayer,\n expected_player1_loaded=True,\n expected_player2_loaded=False)\n self.assert_start_game_does(\n player_types_dict={\"x\": \"TD\", \"o\": \"Random\"},\n player1_class=TDLearningPlayer,\n player2_class=RandomPlayer,\n expected_player1_loaded=False,\n expected_player2_loaded=False)\n\n def test_start_game_loads_learning_players_when_diff_player_type(self):\n self.assert_start_game_does(\n player_types_dict={\"x\": \"TD\", \"o\": \"Random\"},\n player1_class=TDLearningPlayer,\n player2_class=RandomPlayer,\n expected_player1_loaded=True,\n expected_player2_loaded=False)\n self.assert_start_game_does(\n player_types_dict={\"x\": \"Random\", \"o\": \"TD\"},\n player1_class=RandomPlayer,\n player2_class=TDLearningPlayer,\n expected_player1_loaded=False,\n expected_player2_loaded=True)\n\n @patch('random_player.RandomPlayer.get_move')\n def test_make_computer_move(self, get_move_mock):\n get_move_mock.return_value = 4\n self.game.start_game({\"x\": \"Random\", \"o\": \"Human\"})\n set_board(self.game.controller.board, \"XXO|O-X|OOX\")\n self.assert_game_info_is(Board.X, [0, 4, 8], Board.O, \"XXO|OXX|OOX\", self.game.make_computer_move())\n\n @patch('random_player.RandomPlayer.get_move')\n def test_make_human_move(self, get_move_mock):\n get_move_mock.return_value = 6\n self.game.start_game({\"x\": \"Random\", \"o\": \"Human\"})\n self.game.make_computer_move()\n set_board(self.game.controller.board, \"OXX|O-X|---\")\n self.assert_game_info_is(\n Board.O, [0, 3, 6], Board.X, \"OXX|O-X|O--\", self.game.make_human_move(\" 7 \"))\n", "sub_path": "test/test_web_game.py", "file_name": "test_web_game.py", "file_ext": "py", "file_size_in_byte": 5114, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "unittest.TestCase", "line_number": 11, "usage_type": "attribute"}, {"api_name": "web_game.WebGame", "line_number": 13, "usage_type": "call"}, {"api_name": "board.Board.X", "line_number": 23, "usage_type": "attribute"}, {"api_name": "board.Board", "line_number": 23, "usage_type": "name"}, {"api_name": "board.Board.O", "line_number": 23, "usage_type": "attribute"}, {"api_name": "board.Board.X", "line_number": 25, "usage_type": "attribute"}, {"api_name": "board.Board", "line_number": 25, "usage_type": "name"}, {"api_name": "board.Board.X", "line_number": 31, "usage_type": "attribute"}, {"api_name": "board.Board", "line_number": 31, "usage_type": "name"}, {"api_name": "board.Board.O", "line_number": 32, "usage_type": "attribute"}, {"api_name": "board.Board", "line_number": 32, "usage_type": "name"}, {"api_name": "game_controller.GameController", "line_number": 33, "usage_type": "argument"}, {"api_name": "board.Board.X", "line_number": 35, "usage_type": "attribute"}, {"api_name": "board.Board", "line_number": 35, "usage_type": "name"}, {"api_name": "board.Board.O", "line_number": 35, "usage_type": "attribute"}, {"api_name": "mock.patch", "line_number": 18, "usage_type": "call"}, {"api_name": "board_test_utils.get_board_state_tuple", "line_number": 48, "usage_type": "call"}, {"api_name": "board.Board.X", "line_number": 60, "usage_type": "attribute"}, {"api_name": "board.Board", "line_number": 60, "usage_type": "name"}, {"api_name": "board.Board.O", "line_number": 60, "usage_type": "attribute"}, {"api_name": "human_player.HumanPlayer", "line_number": 65, "usage_type": "name"}, {"api_name": "human_player.HumanPlayer", "line_number": 66, "usage_type": "name"}, {"api_name": "td_learning_player.TDLearningPlayer", "line_number": 71, "usage_type": "name"}, {"api_name": "td_learning_player.TDLearningPlayer", "line_number": 72, "usage_type": "name"}, {"api_name": "td_learning_player.TDLearningPlayer", "line_number": 79, "usage_type": "name"}, {"api_name": "random_player.RandomPlayer", "line_number": 80, "usage_type": "name"}, {"api_name": "td_learning_player.TDLearningPlayer", "line_number": 85, "usage_type": "name"}, {"api_name": "random_player.RandomPlayer", "line_number": 86, "usage_type": "name"}, {"api_name": "td_learning_player.TDLearningPlayer", "line_number": 93, "usage_type": "name"}, {"api_name": "random_player.RandomPlayer", "line_number": 94, "usage_type": "name"}, {"api_name": "random_player.RandomPlayer", "line_number": 99, "usage_type": "name"}, {"api_name": "td_learning_player.TDLearningPlayer", "line_number": 100, "usage_type": "name"}, {"api_name": "board_test_utils.set_board", "line_number": 108, "usage_type": "call"}, {"api_name": "board.Board.X", "line_number": 109, "usage_type": "attribute"}, {"api_name": "board.Board", "line_number": 109, "usage_type": "name"}, {"api_name": "board.Board.O", "line_number": 109, "usage_type": "attribute"}, {"api_name": "mock.patch", "line_number": 104, "usage_type": "call"}, {"api_name": "board_test_utils.set_board", "line_number": 116, "usage_type": "call"}, {"api_name": "board.Board.O", "line_number": 118, "usage_type": "attribute"}, {"api_name": "board.Board", "line_number": 118, "usage_type": "name"}, {"api_name": "board.Board.X", "line_number": 118, "usage_type": "attribute"}, {"api_name": "mock.patch", "line_number": 111, "usage_type": "call"}]} +{"seq_id": "320124542", "text": "#!/bin/python3\r\n# version 0.0.1\r\n# Author: Tran Minh Tri\r\n# Organization: CSI - CTRAD\r\n\r\n\"\"\"\r\nUsage:\r\npython3 var_report.py (in the directory of interest)\r\n\r\nInput file(s):\r\n GC*_asn_maf.txt\r\nOutput file(s):\r\n AllAnnotatedVariants.txt\r\nTools:\r\n Process line-by-line, file-by-file with Python\r\n\"\"\"\r\n\r\nimport glob\r\nfrom collections import OrderedDict\r\n\r\ndef parseReads(frmt, reads):\r\n frmt = frmt.split(':')\r\n reads = reads.split(':')\r\n result = ''\r\n for (i, element) in enumerate(frmt):\r\n if element == 'AD':\r\n result = reads[i]\r\n break\r\n result = result.split(',')\r\n return result[0] + '|' + result[1]\r\n\r\nprint('Listing files to be processed:')\r\ncases = {}\r\nfor file in glob.glob('GC*_allVar.txt'):\r\n case = file.split('_')[0]\r\n cases[case] = file\r\n print('\\t', file)\r\n\r\ncases = OrderedDict(sorted(cases.items(), key=lambda t: t[0]))\r\n\r\nvariants = {}\r\nvar_count = 0\r\nlast_case = ''\r\nexclude = [2, 5, 6, 7, 8, 9]\r\nexclude.reverse()\r\n\r\nfor case in cases:\r\n summary = {}\r\n last_case = case\r\n # Read file\r\n file = open(cases[case], 'r')\r\n print('Reading input file:', file.name)\r\n\r\n for line in file:\r\n if \"#\" in line:\r\n continue\r\n var = line.split('\\t')\r\n key = var[0] + '_' + var[1] + '_' + var[3] + '_' + var[4]\r\n if not key in variants:\r\n var_count += 1\r\n var_cases = {}\r\n var_cases[case] = 1\r\n var_sum = 1\r\n var_res = 1 if case in ['GC117', 'GC119'] else 0\r\n var_nonRes = 1 if case in ['GC127', 'GC84'] else 0\r\n var_quals = {}\r\n var_quals[case] = var[5]\r\n var_reads = {}\r\n var_reads[case] = parseReads(var[8], var[9])\r\n for item in exclude:\r\n var.pop(item)\r\n variants[key] = [var_cases, var_sum, var_res, var_nonRes, var_quals, var_reads, \"\\t\".join(var)]\r\n else:\r\n variants[key][0][case] = 1 # cases\r\n variants[key][1] += 1 # sum\r\n variants[key][2] += case in ['GC117', 'GC119'] and 1 or 0\r\n variants[key][3] += case in ['GC127', 'GC84'] and 1 or 0\r\n variants[key][4][case] = var[5] # qual\r\n variants[key][5][case] = parseReads(var[8], var[9]) # reads\r\n file.close()\r\n\r\nprint('Generating header')\r\nheader = []\r\nheader.append('ID')\r\nheader.append('Variant')\r\nfor case in cases: \r\n header.append(case)\r\nheader.append('Sum')\r\nheader.append('Responders sum (117, 119)')\r\nheader.append('Non-responders sum (127, 84)')\r\nfor case in cases: \r\n header.append(case + '_Quality')\r\nfor case in cases: \r\n header.append(case + '_Reads')\r\nifile = open(cases[last_case], 'r')\r\nfor line in ifile:\r\n if '#CHR' in line:\r\n in_header = line.split('\\t')\r\n for item in exclude:\r\n in_header.pop(item)\r\n header += in_header\r\n break\r\nifile.close()\r\n\r\nprint('Writing output file...')\r\nofile = open('AllAnnotatedVariants.txt', 'w')\r\nofile.write('\\t'.join(header))\r\nctr = 0\r\nfor key in variants:\r\n ctr += 1\r\n if ctr % 10000 == 0:\r\n print('\\t' + str(ctr) + ' variants writen')\r\n [var_cases, var_sum, res, nonRes, quals, reads, details] = variants[key]\r\n line = 'var' + str(ctr) + '\\t' + key + '\\t'\r\n for case in cases:\r\n if case in var_cases:\r\n line += '1\\t'\r\n else:\r\n line += '0\\t'\r\n line += str(var_sum) + '\\t' + str(res) + '\\t' + str(nonRes) + '\\t'\r\n for case in cases:\r\n if case in quals:\r\n line += quals[case] + '\\t'\r\n else:\r\n line += '\\t'\r\n for case in cases:\r\n if case in reads:\r\n line += reads[case] + '\\t'\r\n else:\r\n line += '\\t'\r\n line += details\r\n ofile.write(line)\r\nprint('\\t' + str(ctr) + ' variants writen')\r\nprint('Finish writing ' + ofile.name)\r\nofile.close()\r\n", "sub_path": "projects/khk_snp/allVariants_khk_report.py", "file_name": "allVariants_khk_report.py", "file_ext": "py", "file_size_in_byte": 3900, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "glob.glob", "line_number": 34, "usage_type": "call"}, {"api_name": "collections.OrderedDict", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "395030817", "text": "from django import template\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef stars(skill, level):\n '''\n Creates a skill labeled with a series of 5 stars filled in\n according to level.\n '''\n if level < 0 or level > 5:\n raise ValueError('Skill must be a number between 0 and 5')\n i = 1\n filled_star = ''\n empty_star = ''\n skill_str = '{0}'.format(skill)\n skill_str += ''\n while i <= level:\n skill_str += filled_star\n i += 1\n while i <= 5:\n skill_str += empty_star\n i += 1\n skill_str += ''\n\n return skill_str\n", "sub_path": "personalsite/templatetags/personalsite_extras.py", "file_name": "personalsite_extras.py", "file_ext": "py", "file_size_in_byte": 755, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.template.Library", "line_number": 3, "usage_type": "call"}, {"api_name": "django.template", "line_number": 3, "usage_type": "name"}]} +{"seq_id": "653012354", "text": "# Kasim Terzic (kt54) Feb 2018\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# Our linear model defined as a list of theta parameters\n# Accepts the samples and thetas and returns the vector of\n# predictions\ndef f(x, theta):\n y_hat = []\n\n # Calculate our linear function\n # remember, X_0 is 1, so theta_0 is intercept\n for sample in x:\n y_acc = 0\n for i in range(len(sample)):\n y_acc = y_acc + theta[i]*sample[i]\n y_hat.append(y_acc)\n return np.array(y_hat)\n\n# Our squared error loss and MSE loss\ndef error(y, y_hat):\n return (y-y_hat)\n\ndef squaredError(y, y_hat):\n return (y-y_hat)**2\n\ndef meanSquaredErrorLoss(y, y_hat):\n return squaredError(y,y_hat).mean()\n\ndef gradient(x,y,theta):\n err = error(y, f(x,theta))\n grad = -(1.0/len(x)) * err.dot(x)\n return grad\n\n# Gradient descent to find the best parameters\ndef gradientDescent(x,y,alpha,theta,stop=.1):\n grad = gradient(x,y,theta)\n MSE_Theta_list = []\n\n while np.linalg.norm(grad) > stop:\n # Move in the direction of the gradient\n # N.B. this is point-wise multiplication, not a dot product\n theta = theta - grad*alpha\n mse = meanSquaredErrorLoss(y,f(x,theta))\n grad = gradient(x,y,theta)\n MSE_Theta_list.append((mse, theta))\n print(mse)\n\n print(\"Gradient descent finished. MSE=\"+str(mse))\n return theta, mse, MSE_Theta_list\n\n# Finally, a helper to display the model against the data\ndef plotModel(x, y, y_hat, title='Plot'):\n # Create a dictionary to pass to matplotlib\n # These settings make the plots readable on slides, feel free to change\n # This is an easy way to set many parameters at once\n fontsize = \"30\";\n params = {'figure.autolayout':True,\n 'legend.fontsize': fontsize,\n 'figure.figsize': (12, 8),\n 'axes.labelsize': fontsize,\n 'axes.titlesize': fontsize,\n 'xtick.labelsize':fontsize,\n 'ytick.labelsize':fontsize}\n plt.rcParams.update(params)\n \n # Create a new figure and an axes objects for the subplot\n # We only have one plot here, but it's helpful to be consistent\n fig, ax = plt.subplots()\n \n # Draw a scatter plot of the first column of x vs second column.\n ax.scatter(x[:,1], y,color='blue', alpha=.8, s=140, marker='v')\n ax.set_xlabel('Height')\n ax.set_ylabel('Weight')\n ax.grid(color='lightgray', linestyle='-', linewidth=1)\n ax.set_axisbelow(True)\n ax.set_title(title)\n\n x2 = [x[:,1].min(), x[:,1].max()]\n y2 = [y_hat.min(), y_hat.max()]\n ax.plot(x2,y2,color='red', linewidth='3')\n\n# And a function to plot the loss function\ndef plotLossFunction(x, y, MSE_Theta, title='Plot'):\n # Create a dictionary to pass to matplotlib\n # These settings make the plots readable on slides, feel free to change\n # This is an easy way to set many parameters at once\n fontsize = \"20\";\n params = {'figure.autolayout':True,\n 'legend.fontsize': fontsize,\n 'figure.figsize': (12, 8),\n 'axes.labelsize': fontsize,\n 'axes.titlesize': fontsize,\n 'xtick.labelsize':fontsize,\n 'ytick.labelsize':fontsize}\n plt.rcParams.update(params)\n \n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n\n res = 30\n xspace = np.linspace(-1, 2, res)\n yspace = np.linspace(-1, 2, res)\n xx, yy = np.meshgrid(xspace, yspace)\n xy = np.c_[xx.ravel(), yy.ravel()]\n L = []\n for theta in xy:\n L.append(meanSquaredErrorLoss(y,f(x,theta)))\n L = np.array(L).reshape(res,res)\n\n ax.plot_surface(xx, yy, L, rstride=1, cstride=1, cmap='jet', edgecolor='none', alpha=0.7) \n ax.set_xlabel('Theta 0')\n ax.set_ylabel('Theta 1')\n ax.set_title(title)\n thetaWithContour(MSE_Theta, ax)\n\ndef errorOverTime(epochs, MSEs):\n x = epochs\n y = MSEs\n\n fig, ax = plt.subplots()\n \n ax.scatter(x, y,color='blue')\n ax.set_xlabel('Epochs')\n ax.set_ylabel('MSE')\n ax.set_xlim(-100, 1000)\n ax.grid(color='lightgray', linestyle='-', linewidth=1)\n ax.set_axisbelow(True)\n ax.set_title(\"MSE vs. Epochs\")\n\ndef thetaWithContour(thetas, ax):\n theta_zero = [x[1][0] for x in thetas]\n theta_one = [x[1][1] for x in thetas]\n ax.scatter(theta_zero, theta_one, color='black')\n", "sub_path": "l04_utils.py", "file_name": "l04_utils.py", "file_ext": "py", "file_size_in_byte": 4373, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.array", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 42, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 67, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.rcParams.update", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.rcParams", "line_number": 98, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 104, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 105, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.c_", "line_number": 107, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}]} +{"seq_id": "195640953", "text": "import logging\nimport os\nfrom string import Template\n\nimport numpy as np\nfrom dataforge import Meta\nfrom phd.satellite.covert_to_hdf5 import convert_satellite_proto\nfrom phd.satellite.run import input_generator_satellite\nfrom phd.utils.hdf5_tools import get_convertor, ProtoReader\nfrom phd.utils.run_tools import multirun_command, \\\n dir_name_generator, values_from_dict, InputData\n\nROOT_PATH = os.path.dirname(__file__)\n\nINPUT_TEMPLATE = \"\"\"/npm/geometry/type gdml\n/npm/geometry/gdml ../../build/satellite/gdml/satellite.gdml\n/npm/visualization false\n/npm/satellite/detector ${mode}\n/npm/satellite/output file\n\n/gps/particle ${particle}\n/gps/number 1\n/gps/direction ${dirX} 0.0 ${dirZ}\n/gps/ene/mono ${energy} MeV\n/gps/position ${posX} 0. ${posZ} m\n/run/beamOn ${number}\nexit\n\"\"\"\n\n\n\n\n\ndef main():\n logging.basicConfig(filename=\"run.log\")\n logging.root.setLevel(logging.DEBUG)\n OUTPUT_FILE = \"result.hdf5\"\n values_macros = {\n \"mode\" : \"single\", # or \"sum\" Сожранение или всех симуляций или среднего значения.\n \"radius\" : 0.15,\n # \"shift\": [0.0, 0.005, 0.016],\n # \"theta\": [0.0, 10.0, 20., 30.0, 50.0, 70.0],\n \"shift\" : 0.0, # Сдвиг относительно центральной оси, может быть списком\n \"theta\" : 0.0, # Отклонение от центральной оси, может быть списком\n \"theta_unit\": \"degree\",\n 'energy': np.arange(10.0,15.1, 1.0), # Набор энергий для симуляции\n 'number': [100], # Число событий в одной симуляции\n # 'particle': 'e-'\n 'particle': 'proton' # Запускаемая частица\n }\n meta = Meta(\n {\n \"macros\": values_macros,\n }\n )\n\n input_data = input_generator_satellite(meta, INPUT_TEMPLATE, init_pos=[0.0,0.0, 0.1])\n command = \"../../build/satellite/geant4-satellite.exe\"\n readers = [ProtoReader(\"deposit.proto.bin\", proto_convertor=convert_satellite_proto)]\n multirun_command(input_data, command, post_processor=get_convertor(readers, OUTPUT_FILE, clear=True))\n return 0\n\n\nif __name__ == '__main__':\n main()\n", "sub_path": "python/simulation_scripts/satellite/satellite_0.py", "file_name": "satellite_0.py", "file_ext": "py", "file_size_in_byte": 2238, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "logging.basicConfig", "line_number": 35, "usage_type": "call"}, {"api_name": "logging.root.setLevel", "line_number": 36, "usage_type": "call"}, {"api_name": "logging.root", "line_number": 36, "usage_type": "attribute"}, {"api_name": "logging.DEBUG", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 46, "usage_type": "call"}, {"api_name": "dataforge.Meta", "line_number": 51, "usage_type": "call"}, {"api_name": "phd.satellite.run.input_generator_satellite", "line_number": 57, "usage_type": "call"}, {"api_name": "phd.utils.hdf5_tools.ProtoReader", "line_number": 59, "usage_type": "call"}, {"api_name": "phd.satellite.covert_to_hdf5.convert_satellite_proto", "line_number": 59, "usage_type": "name"}, {"api_name": "phd.utils.run_tools.multirun_command", "line_number": 60, "usage_type": "call"}, {"api_name": "phd.utils.hdf5_tools.get_convertor", "line_number": 60, "usage_type": "call"}]} +{"seq_id": "591874097", "text": "import pandas as pd\nimport tensorflow as tf\nimport transformers as ppb\nfrom sklearn.model_selection import train_test_split\n\ndf = pd.read_csv('books_v1_02_cleaned.tsv.gz', compression='gzip', sep='\\t')\n#df = pd.read_csv('out.csv')\n\ndf = df.astype({'star_rating': float})\ndf['star_rating'] = (df['star_rating'] - 1) / 4.\n\ntrain_texts, val_texts, train_labels, val_labels = train_test_split(df.review_body.values, df.star_rating.values,\n test_size=.2)\ntest_texts, val_texts, test_labels, val_labels = train_test_split(val_texts, val_labels, test_size=.5)\n\nmodel_class, tokenizer_class, pretrained_weights = (\n ppb.TFBertForSequenceClassification, ppb.BertTokenizerFast,\n 'bert-base-uncased') # for trainer API\n\nconfig = ppb.BertConfig.from_pretrained(pretrained_weights, num_labels=1, problem_type=\"regression\")\ntokenizer = tokenizer_class.from_pretrained(pretrained_weights, config=config)\n\ntrain_encodings = tokenizer(train_texts.tolist(), padding=True, truncation=True)\nval_encodings = tokenizer(val_texts.tolist(), padding=True, truncation=True)\ntest_encodings = tokenizer(test_texts.tolist(), padding=True, truncation=True)\n\ntrain_dataset = tf.data.Dataset.from_tensor_slices((dict(train_encodings), train_labels))\nval_dataset = tf.data.Dataset.from_tensor_slices((dict(val_encodings), val_labels))\ntest_dataset = tf.data.Dataset.from_tensor_slices((dict(test_encodings), test_labels))\nfor i in range(8):\n shard = train_dataset.shard(8, i)\n tf.data.experimental.save(shard, f\"./dataset_shards/train_dataset_{i}\", compression=\"GZIP\")\n\n shard = val_dataset.shard(8, i)\n tf.data.experimental.save(shard, f\"./dataset_shards/val_dataset_{i}\", compression=\"GZIP\")\n\n shard = test_dataset.shard(8, i)\n tf.data.experimental.save(shard, f\"./dataset_shards/test_dataset_{i}\", compression=\"GZIP\")\n", "sub_path": "make_datasets.py", "file_name": "make_datasets.py", "file_ext": "py", "file_size_in_byte": 1880, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pandas.read_csv", "line_number": 6, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 12, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 14, "usage_type": "call"}, {"api_name": "transformers.TFBertForSequenceClassification", "line_number": 17, "usage_type": "attribute"}, {"api_name": "transformers.BertTokenizerFast", "line_number": 17, "usage_type": "attribute"}, {"api_name": "transformers.BertConfig.from_pretrained", "line_number": 20, "usage_type": "call"}, {"api_name": "transformers.BertConfig", "line_number": 20, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 27, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 28, "usage_type": "attribute"}, {"api_name": "tensorflow.data.Dataset.from_tensor_slices", "line_number": 29, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 29, "usage_type": "attribute"}, {"api_name": "tensorflow.data.experimental.save", "line_number": 32, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 32, "usage_type": "attribute"}, {"api_name": "tensorflow.data.experimental.save", "line_number": 35, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 35, "usage_type": "attribute"}, {"api_name": "tensorflow.data.experimental.save", "line_number": 38, "usage_type": "call"}, {"api_name": "tensorflow.data", "line_number": 38, "usage_type": "attribute"}]} +{"seq_id": "39950576", "text": "# coding: utf-8\n\nfrom leancloud import Engine\nfrom leancloud import LeanEngineError\nimport time\nfrom app import app\nimport sqlite3API\nimport auto_trader\nimport easyquotation\nfrom send_mail import send_mail\nimport leanDBAccess \nimport os\nimport requests\n\nimport pandas as pd\nfrom pandas.compat import StringIO\ntry:\n from urllib.request import urlopen, Request\nexcept ImportError:\n from urllib2 import urlopen, Request\n\nengine = Engine(app)\n\n@engine.define\ndef huanxing():\n print('huanxing',time.time())\n \n@engine.define\ndef getHangqingFromQQ():\n q = easyquotation.use('qq')\n\n #取上市300天内的最小流通市值 top 40\n dic,stock_list_300 = gettimeToMarket()\n #持仓股取得\n dic_position = auto_trader.getPosition()\n #获取对象\n stock_list = list(set(stock_list_300)|set(dic_position.keys()))\n\n #取得最新行情 from qq\n stockinfo,stockinfo_zhangting = q.stocks(stock_list)\n\n #按流通市值排序\n temp = sorted(stockinfo.items(), key=lambda d:d[1]['流通市值'])\n #非持仓 最小流通市值取得\n min_liutong_trade = None\n #print(temp)\n for key,value in temp:\n if checkExistsCode(key)==False:\n min_liutong_trade = value\n print('min_liutong_trade',min_liutong_trade['code'],min_liutong_trade['name'],min_liutong_trade['流通市值'])\n break\n \n #取得最大持仓\n max_chicang_liutong_trade = getMaxChicangLiutong(stockinfo,dic_position.keys())\n\n #该股为持仓股时,判断是否需要调仓\n if max_chicang_liutong_trade and min_liutong_trade:\n print ('max_chicang_trade',max_chicang_liutong_trade['code'],max_chicang_liutong_trade['name'],max_chicang_liutong_trade['流通市值'])\n \n max_liutong_sunhao = max_chicang_liutong_trade['流通市值']*max_chicang_liutong_trade['bid1']/max_chicang_liutong_trade['now']\n min_liutong_sunhao = min_liutong_trade['流通市值']*min_liutong_trade['ask1']/min_liutong_trade['now']\n max_min_cha_suohao = round((max_liutong_sunhao/min_liutong_sunhao - 1)*100,3)\n #max_min_cha = str(round((max_chicang_liutong_trade['流通市值']/min_liutong_trade['流通市值'] - 1)*100,2)) + '%'\n # print(max_min_cha_suohao)\n #auto_trader.autoTrader(max_chicang_liutong_trade,min_liutong_trade,max_min_cha_suohao)\n checkTrader(max_chicang_liutong_trade,min_liutong_trade,max_min_cha_suohao)\n\ndef checkTrader(position_info,min_liutong,cha):\n# code_position = position_info['code']\n #满足差价(4%)才交易\n print ('cha %s' % cha)\n if cha <= auto_trader.getShizhiCha(min_liutong['datetime'][1:6]) :\n return\n \n if auto_trader.checkFadingJiari()==False:\n print('isOpen = False')\n return\n \n if auto_trader.checkTradeTime(min_liutong['datetime'][1:6]) == False:\n print('not trade time %s' % min_liutong['datetime'])\n return\n \n strSell = '卖出:%s(%s) 流通市值(%s)' % (position_info['name'],position_info['code'],position_info['流通市值'])\n strBuy = '买入:%s(%s) 流通市值(%s)' % (min_liutong['name'],min_liutong['code'],min_liutong['流通市值'])\n send_mail('【满足调仓】市值差:%s' % cha,'%s \\r\\n %s' % (strSell,strBuy))\n \n'''\n #最小流通市值取得\n min_liutong = min(stockinfo.items(), key=lambda d:d[1]['流通市值'])[1]\n \n #get Position\n dic_position = auto_trader.getPosition()\n \n# if key in dic_position.keys():\n \n #计算流通市值差\n for key,value in stockinfo.items():\n try:\n if key in dic_position.keys():\n #市值差 (流通市值/最小流通市值)-1\n stockinfo[key]['cha'] = str(round((float(stockinfo[key]['流通市值'])/float(min_liutong['流通市值']) - 1)*100,2)) + '%'\n #去损耗市值差 (流通市值*现价/买1价)/(最小流通市值*现价/卖1价)-1\n liutong_sunhao = stockinfo[key]['流通市值']*stockinfo[key]['bid1']/stockinfo[key]['now']\n min_liutong_sunhao = min_liutong['流通市值']*min_liutong['ask1']/min_liutong['now']\n stockinfo[key]['cha_sunhao'] = str(round((liutong_sunhao/min_liutong_sunhao - 1)*100,2)) + '%'\n\n #该股为持仓股时,判断是否需要调仓\n # if key in dic_position.keys():\n auto_trader.autoTrader(stockinfo[key],min_liutong,round((liutong_sunhao/min_liutong_sunhao - 1)*100,3))\n except Exception as e:\n print (e)\n''' \n#从本地sqlite取得上市日期\ndef gettimeToMarket():\n\n conn = sqlite3API.get_conn('stock.db')\n sql_tid='''\n select stock_info.code,stock_info.timeToMarket from liutong_from_qq \n inner join stock_info on\n liutong_from_qq.code = stock_info.code\n where liutong_from_qq.liutong<13 and substr(liutong_from_qq.code,1,1) != '3' \n and substr(stock_info.timeToMarket,1,4) || '-' || substr(stock_info.timeToMarket,5,2) || '-' || substr(stock_info.timeToMarket,7,2) > date('now','-270 days')\n order by liutong_from_qq.liutong \n limit 40;\n '''\n #union all XXX 持仓股\n \n info_tid=sqlite3API.fetchmany(conn,sql_tid)\n dic = dict()\n stock_list=[]\n for info_temp in info_tid:\n dic[info_temp[0]] = str(info_temp[1])\n stock_list.append(info_temp[0])\n \n return dic,stock_list\n\n''' 删除跟券商关联部分 017/07/06 \n@engine.define\ndef getPosition():\n try:\n user = auto_trader.getUser()\n #position\n data = auto_trader.insertPosition(user.position)\n send_mail('Position ',str(data))\n print(str(data))\n except Exception as e :\n print(str(e))\n send_mail('[error] Position ',str(e))\n\n@engine.define\ndef buyIPO():\n try:\n user = auto_trader.getUser()\n #getIpo\n df_today_ipo,df_ipo_limit = user.get_ipo_info()\n result_mail = ''\n for i in range(len(df_today_ipo)):\n code = df_today_ipo.ix[i]['代码']\n price = df_today_ipo.ix[i]['价格']\n amount = df_today_ipo.ix[i]['账户额度']\n result = user.buy(code,price,amount=amount)\n result_mail += '\\r\\n[%s]buy IPO:%s,%s,%s,%s' % (str(i+1),code,price,amount,str(result))\n time.sleep(2)\n \n if result_mail:\n send_mail('buyIPO',result_mail)\n print(result_mail)\n else:\n #none ipo\n print('today none IPO!')\n \n #资金状况\n leanDBAccess.saveBalanceLeanCloud(user.balance)\n time.sleep(2)\n #持仓\n auto_trader.insertPosition(user.position)\n# time.sleep(2)\n except Exception as e :\n print(str(e))\n send_mail('[error] buyIPO ',str(e))\n'''\n\n@engine.define\ndef getAllStockInfo():\n df = get_stock_basics()\n conn = sqlite3API.get_conn('stock.db')\n df.to_sql('stock_info',con=conn,flavor='sqlite', if_exists='replace')\n \n #取得流通市值\n getLiutong_from_qq()\n \n#get_stock_basics\ndef get_stock_basics():\n \"\"\"\n 获取沪深上市公司基本情况\n Return\n --------\n DataFrame\n code,代码\n name,名称\n industry,细分行业\n area,地区\n pe,市盈率\n outstanding,流通股本\n totals,总股本(万)\n totalAssets,总资产(万)\n liquidAssets,流动资产\n fixedAssets,固定资产\n reserved,公积金\n reservedPerShare,每股公积金\n eps,每股收益\n bvps,每股净资\n pb,市净率\n timeToMarket,上市日期\n \"\"\"\n# ALL_STOCK_BASICS_FILE = 'http://218.244.146.57/static/all.csv'\n ALL_STOCK_BASICS_FILE = 'http://file.tushare.org/tsdata/all.csv'\n request = Request(ALL_STOCK_BASICS_FILE)\n text = urlopen(request, timeout=10).read()\n text = text.decode('GBK')\n text = text.replace('--', '')\n df = pd.read_csv(StringIO(text), dtype={'code':'object'})\n df = df.set_index('code')\n return df\n\ndef getCixinCode():\n conn = sqlite3API.get_conn('stock.db')\n\n sql_tid='''\n select code from stock_info \n where substr(stock_info.timeToMarket,1,4) || '-' || substr(stock_info.timeToMarket,5,2) || '-' || substr(stock_info.timeToMarket,7,2) > date('now','-300 days') \n and substr(code,1,1) != '3' ;\n '''\n info_tid=sqlite3API.fetchmany(conn,sql_tid)\n stock_list=[]\n for info_temp in info_tid:\n stock_list.append(info_temp[0])\n \n return stock_list \n\ndef getLiutong_from_qq():\n q = easyquotation.use('qq')\n\n #取上市300天内的股票\n stock_list = getCixinCode()\n stockinfo,stockinfo_zhangting = q.stocks(stock_list)\n data = []\n \n for key,value in stockinfo.items():\n try:\n infoLiutong = (stockinfo[key]['code'],stockinfo[key]['流通市值'])\n data.append(infoLiutong)\n\n except Exception as e:\n print(e)\n \n for key,value in stockinfo_zhangting.items():\n try:\n infoLiutong = (stockinfo_zhangting[key]['code'],stockinfo_zhangting[key]['流通市值'])\n data.append(infoLiutong)\n\n except Exception as e:\n print(e)\n #sql_truncat = 'truncat table liutong_from_qq'\n sql = 'insert into liutong_from_qq values(?,?)'\n conn = sqlite3API.get_conn('stock.db')\n #sqlite3API.save(conn,sql_truncat,data)\n sqlite3API.truncate(conn,'liutong_from_qq')\n\n sqlite3API.save(conn,sql,data)\n print('getLiutong_from_qq OK!')\n\n\n#判断股票是否持仓\ndef checkExistsCode(code):\n conn = sqlite3API.get_conn('stock.db')\n\n sql_tid='''\n select code from chicang \n where code = '%s' ;\n '''\n info_tid=sqlite3API.fetchmany(conn,sql_tid % code)\n if info_tid and len(info_tid)>0:\n return True\n else:\n return False\n \n#取得可用股份数\ndef getKeyongGufen(code):\n conn = sqlite3API.get_conn('stock.db')\n\n sql_tid='''\n select gufen_keyong from chicang \n where code = '%s' ;\n '''\n info_tid=sqlite3API.fetchmany(conn,sql_tid % code)\n if info_tid and len(info_tid)>0:\n return info_tid[0][0]\n else:\n return 0\n\n#取得持仓股中,流通市值最大的且可交易的股票\ndef getMaxChicangLiutong(stockinfo,listCode):\n dicMaxLiutong = dict()\n liutong = 0.0\n for code in listCode:\n if getKeyongGufen(code)>0 and stockinfo[code]['流通市值']>liutong:\n liutong = stockinfo[code]['流通市值']\n dicMaxLiutong = stockinfo[code]\n return dicMaxLiutong\n\n#每日记录下持仓及盈亏\n@engine.define\ndef getPositionTongji():\n \n dic_position = leanDBAccess.getNewPositionLeanCloud()\n dic_position_history = leanDBAccess.getNewPositionHistoryLeanCloud()\n \n q = easyquotation.use('qq')\n stockinfo,stockinfo_zhangting = q.fetch_stocks(list(dic_position.keys()))\n #合并\n dictMerged=stockinfo.copy()\n dictMerged.update(stockinfo_zhangting)\n \n #按涨跌幅排序\n temp = sorted(dictMerged.items(), key=lambda d:d[1]['涨跌幅'])\n \n #总持仓市值\n allPosition = 0.0\n #总盈亏\n allYingkui = 0.0\n #今日盈亏\n todayYingkui = 0.0\n #港股汇率\n HKhuilv = float(os.environ['HKhuilv'])\n #分类合计\n# dic_shichang_fenlei = dict()\n #行业分类合计\n dic_hangye_fenlei = dict()\n #position\n position_dic = dict()\n #历史持仓统计\n for key in dic_position_history.keys():\n start_price = dic_position_history[key]['start_price']\n end_price = dic_position_history[key]['end_price']\n gushu = dic_position_history[key]['num']\n dic_position_history[key]['盈亏']=round((end_price-start_price)*gushu,1)\n dic_position_history[key]['盈亏(%)']=str(round((end_price/start_price-1)*100,2)) + '%'\n dic_position_history[key]['市值']=round(end_price*gushu,0)\n \n #B股,港币的时候\n if dic_position_history[key]['bizhong']=='HK' :\n allYingkui += round((end_price-start_price)*gushu,2)*HKhuilv\n else:\n allYingkui += round((end_price-start_price)*gushu,2)\n \n for key,value in dictMerged.items():\n try:\n #股数\n gushu = dic_position[key][0]\n #成本价\n chengben = dic_position[key][1]\n #行业分类\n hangye = dic_position[key][2]\n\n #now\n now = dictMerged[key]['now']\n #持仓市值\n chicang = round(gushu * dictMerged[key]['now'],2)\n #盈亏\n yingkui = round(gushu * dictMerged[key]['涨跌'],2)\n \n position_dic[key]=chicang\n \n# dictMerged[key]['行业']=hangye\n# dictMerged[key]['股数']=gushu\n# dictMerged[key]['持仓市值']=chicang\n# dictMerged[key]['盈亏']=yingkui\n# dictMerged[key]['总盈亏']=round((now-chengben)*gushu,2)\n# dictMerged[key]['总盈亏(%)']=str(round((now/chengben-1)*100,2)) + '%'\n #B股,港币的时候\n if dic_position[key][4]=='HK' :\n allPosition += chicang*HKhuilv\n allYingkui += round((now-chengben)*gushu,2)*HKhuilv\n todayYingkui += yingkui*HKhuilv\n else:\n allPosition += chicang\n allYingkui += round((now-chengben)*gushu,2)\n todayYingkui += yingkui\n\n# #行业分类\n# if hangye in dic_hangye_fenlei.keys():\n# dic_hangye_fenlei[hangye] += chicang\n# else:\n# dic_hangye_fenlei[hangye] = chicang\n except Exception as e:\n pass\n# #分类合计\n# for item in dic_shichang_fenlei:\n# dic_shichang_fenlei[item] = [dic_shichang_fenlei[item],str(round(dic_shichang_fenlei[item]*100/allPosition,2))+'%']\n# #行业分类\n# for item in dic_hangye_fenlei:\n# dic_hangye_fenlei[item] = [dic_hangye_fenlei[item],str(round(dic_hangye_fenlei[item]*100/allPosition,2))+'%']\n #保存\n leanDBAccess.saveNewPositionLeanCloud(allPosition,allYingkui,todayYingkui,position_dic)\n\n@engine.define\ndef getKezhuanzhai():\n headers = {\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'zh-CN,zh;q=0.9,ja;q=0.8,zh-TW;q=0.7',\n 'Connection': 'keep-alive',\n 'Host': 'www.jisilu.cn',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36',\n }\n url_cb_list = 'https://www.jisilu.cn/data/cbnew/cb_list/'\n url_pre_list = 'https://www.jisilu.cn/data/cbnew/pre_list/'\n cb_list = requests.get(url_cb_list,headers=headers)\n pre_list = requests.get(url_pre_list,headers=headers)\n\n leanDBAccess.saveKezhuanzhai(cb_list.text,pre_list.text)\n \nif __name__ == '__mian__':\n# getHangqingFromQQ()\n pass", "sub_path": "cloud.py", "file_name": "cloud.py", "file_ext": "py", "file_size_in_byte": 15277, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "leancloud.Engine", "line_number": 22, "usage_type": "call"}, {"api_name": "app.app", "line_number": 22, "usage_type": "argument"}, {"api_name": "time.time", "line_number": 26, "usage_type": "call"}, {"api_name": "easyquotation.use", "line_number": 30, "usage_type": "call"}, {"api_name": "auto_trader.getPosition", "line_number": 35, "usage_type": "call"}, {"api_name": "auto_trader.getShizhiCha", "line_number": 72, "usage_type": "call"}, {"api_name": "auto_trader.checkFadingJiari", "line_number": 75, "usage_type": "call"}, {"api_name": "auto_trader.checkTradeTime", "line_number": 79, "usage_type": "call"}, {"api_name": "send_mail.send_mail", "line_number": 85, "usage_type": "call"}, {"api_name": "sqlite3API.get_conn", "line_number": 116, "usage_type": "call"}, {"api_name": "sqlite3API.fetchmany", "line_number": 128, "usage_type": "call"}, {"api_name": "sqlite3API.get_conn", "line_number": 186, "usage_type": "call"}, {"api_name": "urllib2.Request", "line_number": 218, "usage_type": "call"}, {"api_name": "urllib2.urlopen", "line_number": 219, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 222, "usage_type": "call"}, {"api_name": "pandas.compat.StringIO", "line_number": 222, "usage_type": "call"}, {"api_name": "sqlite3API.get_conn", "line_number": 227, "usage_type": "call"}, {"api_name": "sqlite3API.fetchmany", "line_number": 234, "usage_type": "call"}, {"api_name": "easyquotation.use", "line_number": 242, "usage_type": "call"}, {"api_name": "sqlite3API.get_conn", "line_number": 266, "usage_type": "call"}, {"api_name": "sqlite3API.truncate", "line_number": 268, "usage_type": "call"}, {"api_name": "sqlite3API.save", "line_number": 270, "usage_type": "call"}, {"api_name": "sqlite3API.get_conn", "line_number": 276, "usage_type": "call"}, {"api_name": "sqlite3API.fetchmany", "line_number": 282, "usage_type": "call"}, {"api_name": "sqlite3API.get_conn", "line_number": 290, "usage_type": "call"}, {"api_name": "sqlite3API.fetchmany", "line_number": 296, "usage_type": "call"}, {"api_name": "leanDBAccess.getNewPositionLeanCloud", "line_number": 316, "usage_type": "call"}, {"api_name": "leanDBAccess.getNewPositionHistoryLeanCloud", "line_number": 317, "usage_type": "call"}, {"api_name": "easyquotation.use", "line_number": 319, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 335, "usage_type": "attribute"}, {"api_name": "leanDBAccess.saveNewPositionLeanCloud", "line_number": 405, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 420, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 421, "usage_type": "call"}, {"api_name": "leanDBAccess.saveKezhuanzhai", "line_number": 423, "usage_type": "call"}]} +{"seq_id": "181639230", "text": "# import required libraries\nfrom vidgear.gears import NetGear\nimport cv2\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom threading import Thread\nimport time\nfrom multiprocessing import Process, current_process\nfrom scipy import signal\n\ndef plot_matplotlib():\n plt.ion() ## Note this correction\n fig = plt.figure()\n plt.axis([0, 1000, -4000, 4000])\n plt.axis()\n i = 0\n x = list()\n y = list()\n while True:\n print(dvy_mapped)\n\n x.append(i)\n y.append(V)\n #plt.scatter(i, dvy_mapped, s=3, alpha=1);\n plt.plot(x, y, 'g', linewidth=2.0)\n i += 1\n plt.show()\n plt.pause(0.0001)\n if i>1000:\n\n fig.clear()\n plt.draw()\n x = list()\n y = list()\n i = 0.0\ndef draw_axis(rook_image, zero_x, zero_y, W, width, numticks):\n cv2.line(rook_image, (int(width/numticks), 0),(int(width/numticks), W), color=(255, 255, 0), thickness=2)\n cv2.line(rook_image, (0, zero_y), (width, zero_y), color=(255, 255, 0), thickness=2)\n for i in range(numticks):\n cv2.line(rook_image, (int(i*width/numticks), 0), (int(i*width/numticks), W), color=(128, 128, 128), thickness=1)\n cv2.putText(rook_image, str(i-1), (int(i*width/numticks)+10, zero_y+20), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 0), 1)\n cv2.putText(rook_image, str(numticks-i-5), (zero_x+10, int(i*W/numticks)+20), cv2.FONT_HERSHEY_SIMPLEX, 0.4,\n (255, 255, 0), 1)\n cv2.line(rook_image, (0, int(i*W/numticks)), (width, int(i*W/numticks)), color=(128, 128, 128), thickness=1)\ndef plot(size):\n\n time_counter = 0\n delta_time = 0\n current_pixel = 0\n prvs_pixel = 0\n W = size\n width = int(W*1.9)\n scale_x = 500\n scale_y = 0.15\n size = W, width, 3\n zero_x = 60\n zero_y = int(W/2)\n rook_image = np.zeros(size, dtype=np.uint8)\n rook_window = \"Drawing 1: Rook{}\".format(size)\n prvs_dvy_mapped = dvy_mapped\n prvs_dvy_f = dvy_f\n draw_axis(rook_image, zero_x, zero_y, W, width, numticks=10)\n while True:\n start_time = time.time()\n time_counter += delta_time\n if current_pixel >= width:\n rook_image = np.zeros(size, dtype=np.uint8)\n time_counter = 0.0\n current_pixel = 0\n prvs_pixel = 0\n draw_axis(rook_image, zero_x, zero_y, W, width, numticks=10)\n else:\n current_pixel = int(scale_x * time_counter)\n cv2.line(rook_image, (prvs_pixel+zero_x, int(zero_y - scale_y * prvs_dvy_mapped)),\n (current_pixel+zero_x, int(zero_y - scale_y * dvy_mapped)), color=(0, 255, 0), thickness=1)\n cv2.line(rook_image, (prvs_pixel+zero_x, int(zero_y - scale_y * prvs_dvy_f)),\n (current_pixel+zero_x, int(zero_y - scale_y * dvy_f)), color=(0, 0, 255), thickness=1)\n prvs_dvy_mapped = dvy_mapped\n prvs_dvy_f = dvy_f\n\n\n prvs_pixel = int(scale_x * (time_counter-delta_time))\n cv2.imshow(rook_window, rook_image)\n delta_time = time.time() - start_time\n k = cv2.waitKey(1) & 0xff\n if k == 27:\n break\n\n\ndef my_line(img, start, end, thickness=2, line_type = 8):\n\n cv2.line(img,\n start,\n end,\n (0, 255, 0),\n thickness,\n line_type)\n\n\ndef my_line_red(img, start, end):\n thickness = 2\n line_type = 8\n cv2.line(img,\n start,\n end,\n (255, 255, 0),\n thickness,\n line_type)\n\ndvy_mapped = 0.0\ndvy_f= 0.0\ndef stream():\n global dvy_mapped\n global dvy_f\n flag_first_flow_frame = False\n T_f = 0.15\n dvy_f = 0.0\n dt = 0.0\n cap = cv2.VideoCapture(0)\n while True:\n st = time.time()\n ret, frame = cap.read()\n\n # {do something with the extracted frame and data here}\n #ROI = frame[160:240, 0:320].copy()\n ROI = frame[0:240, 0:320].copy()\n if not flag_first_flow_frame:\n prvs = cv2.cvtColor(ROI,cv2.COLOR_BGR2GRAY)\n\n flag_first_flow_frame = True\n pass\n\n next = cv2.cvtColor(ROI, cv2.COLOR_BGR2GRAY)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 1, 15, 1, 5, 1.2, 0)\n flow = cv2.calcOpticalFlowFarneback(prvs, next, None, pyr_scale=0.5, levels=5, winsize=11, iterations=5, poly_n=5, poly_sigma=1.1, flags=0)\n dvx = -np.ma.average(flow[..., 0])\n dvy = np.ma.average(flow[..., 1])\n #my_line(frame, (160, 120), (160 + int((500 * dvx) // 10), 120 + int((500 * dvy) // 10)))\n my_line(frame, (160, 120), (160, 120 + int((500 * dvy) // 10)))\n #my_line_red(frame, (160, 120), (160 + int((500 * dvx) // 10), 120))\n cv2.circle(frame, (160, 120), int((500 * abs(dvy)) // 10), (0, 255, 0), 2)\n #cv2.circle(frame, (160, 120), int((500 * abs(dvx)) // 10), (255, 255, 0), 2)\n prvs = next\n\n dvy_mapped = dvy*500\n dt = time.time() - st\n dvy_f = ((dvy_mapped - dvy_f) * 1 / T_f * dt) + dvy_f\n\n #V = signal.TransferFunction\n #print(dvy_mapped)\n\n\n #frame = cv2.resize(ROI, (320, 240), interpolation=cv2.INTER_AREA)\n # let print recieved server data\n\n\n # Show output window\n cv2.imshow(\"Output Frame\", frame)\n cv2.imshow(\"Output ROI\", ROI)\n # check for 'q' key if pressed\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n\n\n # close output window\n cv2.destroyAllWindows()\n\n # safely close client\n client.close()\n\n\nif __name__ == \"__main__\":\n thread2 = Thread(target=stream, args=())\n thread2.start()\n time.sleep(2)\n thread1 = Thread(target=plot, args=(500,))\n thread1.start()\n\n\n\n thread2.join()\n thread1.join()\n", "sub_path": "LOCAL_HOST_video_client_1_0.py", "file_name": "LOCAL_HOST_video_client_1_0.py", "file_ext": "py", "file_size_in_byte": 5801, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "matplotlib.pyplot.ion", "line_number": 13, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 13, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 14, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 14, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 15, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 16, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 16, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.draw", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "cv2.line", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 41, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 42, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 42, "usage_type": "attribute"}, {"api_name": "cv2.putText", "line_number": 43, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 43, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 59, "usage_type": "attribute"}, {"api_name": "time.time", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 68, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 68, "usage_type": "attribute"}, {"api_name": "cv2.line", "line_number": 75, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 77, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 84, "usage_type": "call"}, {"api_name": "time.time", "line_number": 85, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 86, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 93, "usage_type": "call"}, {"api_name": "cv2.line", "line_number": 104, "usage_type": "call"}, {"api_name": "cv2.VideoCapture", "line_number": 120, "usage_type": "call"}, {"api_name": "time.time", "line_number": 122, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 129, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 129, "usage_type": "attribute"}, {"api_name": "cv2.cvtColor", "line_number": 134, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 134, "usage_type": "attribute"}, {"api_name": "cv2.calcOpticalFlowFarneback", "line_number": 135, "usage_type": "call"}, {"api_name": "cv2.calcOpticalFlowFarneback", "line_number": 136, "usage_type": "call"}, {"api_name": "numpy.ma.average", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 137, "usage_type": "attribute"}, {"api_name": "numpy.ma.average", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.ma", "line_number": 138, "usage_type": "attribute"}, {"api_name": "cv2.circle", "line_number": 142, "usage_type": "call"}, {"api_name": "time.time", "line_number": 147, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 159, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 160, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 162, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 168, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 175, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 177, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 178, "usage_type": "call"}]} +{"seq_id": "290363473", "text": "import urllib\nimport urllib.request\nfrom bs4 import BeautifulSoup\n\n\nfrom tweepy import Stream\nfrom tweepy import OAuthHandler\nfrom tweepy.streaming import StreamListener\n\n\nckey=\"ESAHJglSve3sybJTGt7d3GSnN\"\ncsecret=\"jRzIueSYb8uH9pgCir6yImsi0jjovymRFrQRZzWuJnSTeslBSi\"\natoken=\"4849454360-qxhs8dJST6v1fiC7a3OFwxa6UY6OYwtGZ10w1LB\"\nasecret=\"mWqwshQTZoO7123yaxibvevRwv4ug5PGDnMT9MaWmV95V\"\n\n\ntheurl = \"http://trends24.in/india/~cloud\"\nthepage = urllib.request.urlopen(theurl)\nsoup = BeautifulSoup(thepage,\"html.parser\")\n\ntrend_list = []\nfor item in soup.findAll('ol', {\"class\":\"page-content__tagcloud__list\"}):\n for i in item.findAll('a'):\n #i = i.text.strip()\n trend_list.append(i.text.strip())\n\n\nclass listener(StreamListener):\n def on_data(self, data):\n print(data)\n saveFile = open('twitDB.csv', 'a')\n saveFile.write(data)\n saveFile.close()\n return(True)\n\n def on_error(self, status):\n print(status)\n\n\nauth = OAuthHandler(ckey, csecret)\nauth.set_access_token(atoken, asecret)\n\ntwitterStream = Stream(auth, listener())\ntwitterStream.filter(track=trend_list)", "sub_path": "1web scrapping and download tweets/trends_india_saveData.py", "file_name": "trends_india_saveData.py", "file_ext": "py", "file_size_in_byte": 1117, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "urllib.request.urlopen", "line_number": 18, "usage_type": "call"}, {"api_name": "urllib.request", "line_number": 18, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 19, "usage_type": "call"}, {"api_name": "tweepy.streaming.StreamListener", "line_number": 28, "usage_type": "name"}, {"api_name": "tweepy.OAuthHandler", "line_number": 40, "usage_type": "call"}, {"api_name": "tweepy.Stream", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "269397925", "text": "import logging\nfrom functools import wraps\nfrom urllib.parse import urljoin\n\nimport requests\nfrom bs4 import BeautifulSoup\nfrom crawler_utils import MongoCache\nfrom fake_useragent import FakeUserAgent\nfrom utils.redis_keys import HUPU_PAGE_INDEX_KEY\nfrom redis import Redis\nimport logging.handlers\n\nbase_video_url = \"https://bbs.hupu.com/4858-{}\"\nbase_url = \"https://bbs.hupu.com\"\n\n\ndef set_logger(logger_name):\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.DEBUG)\n handler = logging.handlers.RotatingFileHandler(logger_name, maxBytes=102400)\n # logger.addHandler(handler)\n return logger\n\n\nclass HupuVideoCrawler:\n def __init__(self, cache=None):\n self.user_agent = FakeUserAgent()\n self.headers = {\"user-agent\": self.user_agent.random}\n if cache:\n self.cache = cache\n else:\n self.cache = MongoCache(\n db_name=\"hupu_crawler\", username=\"root\", password=\"newpass\"\n )\n self.logger = set_logger(\"hupu_crawler\")\n self.redis_client = Redis()\n\n def mongo_cache(self, func):\n @wraps(func)\n def mongo_cached(*args, **kwargs):\n key = kwargs.get(\"key\")\n if key:\n collection_name = kwargs.get(\"collection_name\", \"default\")\n try:\n return self.cache.get(collection_name, key)\n except KeyError:\n pass\n return func\n\n return mongo_cached\n\n def get_video_html_messages(self, page_nums=50):\n \"\"\"\n get video_html_urls then parse get video urls\n example: https://bbs.hupu.com/4858-1\n :param page_nums:\n :return:\n \"\"\"\n\n def parse_list_html(html):\n \"\"\"\n zt对会跳舞的小姐姐毫无抵抗力\n :param html:\n :return:\n \"\"\"\n soup = BeautifulSoup(html, \"html.parser\")\n videos = soup.find_all(\"div\", {\"class\": \"titlelink box\"})\n\n for video in videos:\n yield (urljoin(base_url, video.a[\"href\"]), video.a.text)\n\n collection_name = \"video_html\"\n page_begin_index = int(self.redis_client.get(HUPU_PAGE_INDEX_KEY))\n if not page_begin_index:\n page_begin_index = 1\n\n for page_index in range(page_begin_index, page_nums):\n url = base_video_url.format(page_index)\n try:\n html = self.cache.get(collection_name=collection_name, key=url)\n print(self.cache.length(collection_name), \"length\")\n self.logger.info(msg=\"get html from cache: {}\".format(url))\n except KeyError:\n res = requests.get(url=url, timeout=10)\n html = res.text\n self.logger.warning(\n msg=\"cached does not exist, go request url:{} status_code: {}\".format(\n url, res.status_code\n )\n )\n self.cache.set(collection_name=collection_name, key=url, value=html)\n yield parse_list_html(html)\n # 更新Index值\n self.redis_client.set(HUPU_PAGE_INDEX_KEY, page_begin_index + page_nums)\n\n def get_video_messages(self, video_html_messages):\n \"\"\"\n parse video_messages and download video\n :param video_html_messages: tuple\n :return:\n \"\"\"\n collection_name = \"video_html_urls\"\n\n def get_video_url(video_html_url):\n res = requests.get(video_html_url, headers=self.headers)\n soup = BeautifulSoup(res.text, \"html.parser\")\n try:\n video_url = soup.find(\"video\")[\"src\"]\n except TypeError:\n self.logger.debug(\n u\"outlink can not be downloaded: video_html_url: {}\".format(\n video_html_url\n )\n .encode(\"utf-8\")\n .strip()\n )\n return\n self.logger.warning(\n \"cached does not exist, get video url:{} status_code: {}\".format(\n video_url, res.status_code\n )\n )\n return video_url\n\n for page_messages in video_html_messages:\n for video_html_url, video_title in page_messages:\n try:\n video_url = self.cache.get(collection_name, key=video_html_url)[\n \"video_url\"\n ]\n self.logger.info(\n msg=\"get video_url from cache: {} video_title: {}\".format(\n video_url, video_title\n )\n )\n except KeyError:\n video_url = get_video_url(video_html_url)\n if not video_url:\n continue\n self.cache.set(\n collection_name,\n video_html_url,\n {\"video_url\": video_url, \"video_title\": video_title},\n )\n yield video_url, video_title\n\n def download_videos(self, video_messages):\n \"\"\"\n get video_urls from video_messages and save video to local path\n :param video_messages:\n :return:\n \"\"\"\n collection_name = \"video_urls\"\n\n # save to local path\n\n def save(io_content, file_name):\n file_path = urljoin(\"/data/hupu_crawler/\", file_name + \".mp4\")\n with open(file_path, \"wb\") as opener:\n for chunk in io_content:\n opener.write(chunk)\n opener.flush()\n self.logger.warning(\"save video in local path {}\".format(file_path))\n\n for video_url, video_title in video_messages:\n try:\n self.cache.get(collection_name, video_url)\n self.logger.info(\"video_url {} exist in cache !\".format(video_url))\n except KeyError:\n video_messages = {\"video_url\": video_url, \"video_title\": video_title}\n self.cache.set(collection_name, video_url, video_messages)\n res = requests.get(video_url, headers=self.headers, stream=True)\n save(res.iter_content(chunk_size=1024), video_title)\n\n def main(self):\n # video_urls\n video_html_messages = self.get_video_html_messages()\n # get video_messages\n video_messages = self.get_video_messages(video_html_messages)\n # download videos and save to local path\n self.download_videos(video_messages)\n\n\nif __name__ == \"__main__\":\n hupu_crawler = HupuVideoCrawler(\n cache=MongoCache(\"root\", \"newpass\", host=\"127.0.0.1\", db_name=\"hupu\")\n )\n hupu_crawler.main()\n", "sub_path": "crawler/crawler_video.py", "file_name": "crawler_video.py", "file_ext": "py", "file_size_in_byte": 6857, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 19, "usage_type": "attribute"}, {"api_name": "logging.handlers.RotatingFileHandler", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.handlers", "line_number": 20, "usage_type": "attribute"}, {"api_name": "fake_useragent.FakeUserAgent", "line_number": 27, "usage_type": "call"}, {"api_name": "crawler_utils.MongoCache", "line_number": 32, "usage_type": "call"}, {"api_name": "redis.Redis", "line_number": 36, "usage_type": "call"}, {"api_name": "functools.wraps", "line_number": 39, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 66, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 70, "usage_type": "call"}, {"api_name": "utils.redis_keys.HUPU_PAGE_INDEX_KEY", "line_number": 73, "usage_type": "argument"}, {"api_name": "requests.get", "line_number": 84, "usage_type": "call"}, {"api_name": "utils.redis_keys.HUPU_PAGE_INDEX_KEY", "line_number": 94, "usage_type": "argument"}, {"api_name": "requests.get", "line_number": 105, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 106, "usage_type": "call"}, {"api_name": "urllib.parse.urljoin", "line_number": 158, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 172, "usage_type": "call"}, {"api_name": "crawler_utils.MongoCache", "line_number": 186, "usage_type": "call"}]} +{"seq_id": "244242601", "text": "\n\n#########################################################################\n# Copyright Eduardo Fonseca 2018, v1.0\n# This software is distributed under the terms of the MIT License\n#\n# If you use this code or part of it, please cite the following paper:\n# Eduardo Fonseca, Manoj Plakal, Daniel P. W. Ellis, Frederic Font, Xavier Favory, Xavier Serra, \"Learning Sound Event\n# Classifiers from Web Audio with Noisy Labels\", in Proc. IEEE ICASSP 2019, Brighton, UK, 2019\n#\n#########################################################################\n\nimport os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\nfrom tqdm import tqdm, trange\nimport time\nimport pprint\nimport datetime\nimport argparse\nfrom scipy.stats import gmean\nimport yaml\n\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping, ReduceLROnPlateau\nimport tensorflow as tf\nfrom keras import backend as K\n\n\nimport utils\nfrom feat_ext import load_audio_file, get_mel_spectrogram, modify_file_variable_length\nfrom data import get_label_files, DataGeneratorPatch, PatchGeneratorPerFile, DataGeneratorPatchBinary, DataGeneratorFileFeatures\nfrom architectures import get_model_baseline, get_model_binary\nfrom eval import Evaluator\nfrom losses import lq_loss_wrap, crossentropy_max_wrap, crossentropy_outlier_wrap, crossentropy_reed_wrap,\\\n crossentropy_max_origin_wrap, crossentropy_outlier_origin_wrap, lq_loss_origin_wrap, crossentropy_reed_origin_wrap\n\n\ntarget_label = 3\npositive_threshold = 0.9\nadd_criterion = 9\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"7\"\nos.environ[\"HDF5_USE_FILE_LOCKING\"] = 'FALSE'\n\nmodel_path = 'model/generate_clean_data/thres_%.2f_crit_%d/label%d/' % (positive_threshold, add_criterion, target_label)\nrecord_path = 'record/generate_clean_data/thres_%.2f_crit_%d/' % (positive_threshold, add_criterion)\n\nif not os.path.exists(model_path):\n os.makedirs(model_path)\n\nif not os.path.exists(record_path):\n os.makedirs(record_path)\n\n\nstart = time.time()\n\nnow = datetime.datetime.now()\nprint(\"Current date and time:\")\nprint(str(now))\n\n# =========================================================================================================\n\n# ==================================================================== ARGUMENTS\nparser = argparse.ArgumentParser(description='Code for ICASSP2019 paper Learning Sound Event Classifiers from Web Audio'\n ' with Noisy Labels')\nparser.add_argument('-p', '--params_yaml',\n dest='params_yaml',\n action='store',\n required=False,\n type=str)\nargs = parser.parse_args()\nprint('\\nYaml file with parameters defining the experiment: %s\\n' % str(args.params_yaml))\n\n\n\n# =========================================================================Parameters, paths and variables\n# =========================================================================Parameters, paths and variables\n# =========================================================================Parameters, paths and variables\n\n# Read parameters file from yaml passed by argument\nparams = yaml.load(open('config/params.yaml'))\nparams_ctrl = params['ctrl']\nparams_extract = params['extract']\nparams_learn = params['learn']\nparams_loss = params['loss']\nparams_recog = params['recognizer']\n\nsuffix_in = params['suffix'].get('in')\nsuffix_out = params['suffix'].get('out')\n\n\nparams_extract['audio_len_samples'] = int(params_extract.get('fs') * params_extract.get('audio_len_s'))\n#\n\n# ======================================================== PATHS FOR DATA, FEATURES and GROUND TRUTH\n# where to look for the dataset\npath_root_data = params_ctrl.get('dataset_path')\n\nparams_path = {'path_to_features': os.path.join(path_root_data, 'features'),\n 'featuredir_tr': 'audio_train_varup2/',\n 'featuredir_te': 'audio_test_varup2/',\n 'path_to_dataset': path_root_data,\n 'audiodir_tr': 'FSDnoisy18k.audio_train/',\n 'audiodir_te': 'FSDnoisy18k.audio_test/',\n 'audio_shapedir_tr': 'audio_train_shapes/',\n 'audio_shapedir_te': 'audio_test_shapes/',\n 'gt_files': os.path.join(path_root_data, 'FSDnoisy18k.meta')}\n\n\nparams_path['featurepath_tr'] = os.path.join(params_path.get('path_to_features'), params_path.get('featuredir_tr'))\nparams_path['featurepath_te'] = os.path.join(params_path.get('path_to_features'), params_path.get('featuredir_te'))\n\nparams_path['audiopath_tr'] = os.path.join(params_path.get('path_to_dataset'), params_path.get('audiodir_tr'))\nparams_path['audiopath_te'] = os.path.join(params_path.get('path_to_dataset'), params_path.get('audiodir_te'))\n\nparams_path['audio_shapepath_tr'] = os.path.join(params_path.get('path_to_dataset'),\n params_path.get('audio_shapedir_tr'))\nparams_path['audio_shapepath_te'] = os.path.join(params_path.get('path_to_dataset'),\n params_path.get('audio_shapedir_te'))\n\n\n# ======================================================== SPECIFIC PATHS TO SOME IMPORTANT FILES\n# ground truth, load model, save model, predictions, results\nparams_files = {'gt_test': os.path.join(params_path.get('gt_files'), 'test.csv'),\n 'gt_train': os.path.join(params_path.get('gt_files'), 'train.csv')}\n\n# # ============================================= print all params to keep record in output file\nprint('\\nparams_ctrl=')\npprint.pprint(params_ctrl, width=1, indent=4)\nprint('params_files=')\npprint.pprint(params_files, width=1, indent=4)\nprint('params_extract=')\npprint.pprint(params_extract, width=1, indent=4)\nprint('params_learn=')\npprint.pprint(params_learn, width=1, indent=4)\nprint('params_loss=')\npprint.pprint(params_loss, width=1, indent=4)\nprint('params_recog=')\npprint.pprint(params_recog, width=1, indent=4)\nprint('\\n')\n\n\n# ============================================================== READ TRAIN and TEST DATA\n# ============================================================== READ TRAIN and TEST DATA\n# ============================================================== READ TRAIN and TEST DATA\n# ============================================================== READ TRAIN and TEST DATA\n\n# aim: lists with all wav files for tr and te\ntrain_csv = pd.read_csv(params_files.get('gt_train'))\ntest_csv = pd.read_csv(params_files.get('gt_test'))\nfilelist_audio_tr = train_csv.fname.values.tolist()\nfilelist_audio_te = test_csv.fname.values.tolist()\n\n# get positions of manually_verified clips: separate between CLEAN and NOISY sets\nfilelist_audio_tr_flagveri = train_csv.manually_verified.values.tolist()\nidx_flagveri = [i for i, x in enumerate(filelist_audio_tr_flagveri) if x == 1]\nidx_flagnonveri = [i for i, x in enumerate(filelist_audio_tr_flagveri) if x == 0]\n\n# create list of ids that come from the noisy set\nnoisy_ids = [int(filelist_audio_tr[i].split('.')[0]) for i in idx_flagnonveri]\nparams_learn['noisy_ids'] = noisy_ids\n\n# get positions of clips of noisy_small subset\n# subset of the NOISY set of comparable size to that of CLEAN\nfilelist_audio_tr_nV_small_dur = train_csv.noisy_small.values.tolist()\nidx_nV_small_dur = [i for i, x in enumerate(filelist_audio_tr_nV_small_dur) if x == 1]\n\n# create dict with ground truth mapping with labels:\n# -key: path to wav\n# -value: the ground truth label too\nfile_to_label = {params_path.get('audiopath_tr') + k: v for k, v in\n zip(train_csv.fname.values, train_csv.label.values)}\n\n# ========================================================== CREATE VARS FOR DATASET MANAGEMENT\n# list with unique n_classes labels and aso_ids\nlist_labels = sorted(list(set(train_csv.label.values)))\nlist_aso_ids = sorted(list(set(train_csv.aso_id.values)))\n\n# create dicts such that key: value is as follows\n# label: int\n# int: label\nlabel_to_int = {k: v for v, k in enumerate(list_labels)}\nint_to_label = {v: k for k, v in label_to_int.items()}\n\n# create ground truth mapping with categorical values\nfile_to_int = {k: label_to_int[v] for k, v in file_to_label.items()}\n\n\nfile_name = record_path + '/iteration3.csv'\ntrain_csv_clean = pd.read_csv(file_name)\nbinary_labels = np.array(train_csv_clean[str(target_label)])\npositive_list = np.where(binary_labels==1)[0]\nnegative_list = np.where(binary_labels==0)[0]\n\n\nte_files = [f for f in os.listdir(params_path.get('featurepath_tr')) if f.endswith(suffix_in + '.data') and\n os.path.isfile(os.path.join(params_path.get('featurepath_tr'), f.replace(suffix_in, suffix_out)))]\n\nfor iteration in range(4, 5):\n # to store predictions\n te_preds = np.empty((len(te_files), 10))\n list_preds = []\n model_list = []\n for model_j in range(10):\n print('iteration:%d,model:%d'%(iteration,model_j))\n train_idx_neg = np.random.choice(negative_list, len(positive_list), replace=False)\n train_idx = list(positive_list) + list(train_idx_neg)\n ff_list_tr = [filelist_audio_tr[i].replace('.wav', suffix_in + '.data') for i in train_idx]\n labels_audio_train = np.concatenate((np.ones((len(positive_list), 1), dtype=np.float32), np.zeros((len(positive_list), 1), dtype=np.float32)), axis=0)\n\n\n # sanity check\n print('Number of clips considered as train set: {0}'.format(len(ff_list_tr)))\n print('Number of labels loaded for train set: {0}'.format(len(labels_audio_train)))\n\n # split the val set randomly (but stratified) within the train set\n tr_files, val_files = train_test_split(ff_list_tr,\n stratify=labels_audio_train,\n random_state=42\n )\n\n # to improve data generator\n tr_gen_patch = DataGeneratorPatchBinary(labels=labels_audio_train,\n feature_dir=params_path.get('featurepath_tr'),\n file_list=ff_list_tr,\n params_learn=params_learn,\n params_extract=params_extract,\n suffix_in='_mel',\n suffix_out='_label',\n floatx=np.float32\n )\n\n\n # ============================================================DEFINE AND FIT A MODEL\n # ============================================================DEFINE AND FIT A MODEL\n # ============================================================DEFINE AND FIT A MODEL\n\n tr_loss, val_loss = [0] * params_learn.get('n_epochs'), [0] * params_learn.get('n_epochs')\n # ============================================================\n if params_ctrl.get('learn'):\n\n model = get_model_binary(params_learn=params_learn, params_extract=params_extract)\n if iteration > 0:\n modelfile = os.path.join(model_path, 'model%d.h5' % model_j)\n model.load_weights(modelfile)\n\n opt = Adam(lr=params_learn.get('lr'))\n model.compile(optimizer=opt, loss='mean_squared_error', metrics=['accuracy'])\n\n # callbacks\n hist = model.fit_generator(tr_gen_patch,\n steps_per_epoch=tr_gen_patch.nb_iterations,\n epochs=params_learn.get('n_epochs'),\n class_weight=None,\n workers=4,\n verbose=2,\n )\n \n modelfile = os.path.join(model_path, 'model%d.h5' % model_j)\n model.save_weights(modelfile)\n# model_list.append(model)\n \n \n # ==================================================================================================== PREDICT\n # ==================================================================================================== PREDICT\n # ==================================================================================================== PREDICT\n\n\n te_gen_patch = PatchGeneratorPerFile(feature_dir=params_path.get('featurepath_tr'),\n file_list=te_files,\n params_extract=params_extract,\n suffix_in='_mel',\n floatx=np.float32,\n scaler=tr_gen_patch.scaler\n )\n \n for i in trange(len(te_files), miniters=int(len(te_files) / 100), ascii=True, desc=\"Predicting...\"):\n # return all patches for a sound file\n patches_file = te_gen_patch.get_patches_file()\n for model_j in range(10):\n modelfile = os.path.join(model_path, 'model%d.h5' % model_j)\n model.load_weights(modelfile)\n # predicting now on the T_F patch level (not on the wav clip-level)\n preds_patch_list = model.predict(patches_file).tolist()\n preds_patch = np.array(preds_patch_list)\n preds_file = np.mean(preds_patch, axis=0)\n \n te_preds[i, model_j] = preds_file \n \n K.clear_session()\n tf.reset_default_graph()\n \n pos_pred_valid = np.sum(te_preds >= positive_threshold, axis=1)\n add_index = np.where(pos_pred_valid >= add_criterion)[0]\n \n file_name = record_path + '/iteration%d.csv'%iteration\n if not os.path.exists(file_name): \n train_csv_clean = pd.read_csv('record/generate_clean_data/train.csv')\n else:\n train_csv_clean = pd.read_csv(file_name)\n binary_labels = np.array(train_csv_clean[str(target_label)])\n binary_labels[add_index] = 1\n train_csv_clean[str(target_label)] = binary_labels\n train_csv_clean.to_csv(file_name, index=False)\n positive_list = np.where(binary_labels==1)[0]\n negative_list = np.where(binary_labels==0)[0]\n \n \n", "sub_path": "generate_clean_data-Copy2.py", "file_name": "generate_clean_data-Copy2.py", "file_ext": "py", "file_size_in_byte": 14171, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.environ", "line_number": 45, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 46, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 51, "usage_type": "call"}, {"api_name": "os.path", "line_number": 51, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 55, "usage_type": "call"}, {"api_name": "time.time", "line_number": 58, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 60, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 60, "usage_type": "attribute"}, {"api_name": "argparse.ArgumentParser", "line_number": 67, "usage_type": "call"}, {"api_name": "yaml.load", "line_number": 84, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 102, "usage_type": "call"}, {"api_name": "os.path", "line_number": 102, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 110, "usage_type": "call"}, {"api_name": "os.path", "line_number": 110, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 113, "usage_type": "call"}, {"api_name": "os.path", "line_number": 113, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 117, "usage_type": "call"}, {"api_name": "os.path", "line_number": 117, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 121, "usage_type": "call"}, {"api_name": "os.path", "line_number": 121, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 127, "usage_type": "call"}, {"api_name": "os.path", "line_number": 127, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 128, "usage_type": "call"}, {"api_name": "os.path", "line_number": 128, "usage_type": "attribute"}, {"api_name": "pprint.pprint", "line_number": 132, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 134, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 136, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 138, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 140, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 142, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 152, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 153, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 194, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 195, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 196, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 199, "usage_type": "call"}, {"api_name": "os.path.isfile", "line_number": 200, "usage_type": "call"}, {"api_name": "os.path", "line_number": 200, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.empty", "line_number": 204, "usage_type": "call"}, {"api_name": "numpy.random.choice", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 209, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 212, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 212, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 212, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 220, "usage_type": "call"}, {"api_name": "data.DataGeneratorPatchBinary", "line_number": 226, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 233, "usage_type": "attribute"}, {"api_name": "architectures.get_model_binary", "line_number": 245, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 247, "usage_type": "call"}, {"api_name": "os.path", "line_number": 247, "usage_type": "attribute"}, {"api_name": "keras.optimizers.Adam", "line_number": 250, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 262, "usage_type": "call"}, {"api_name": "os.path", "line_number": 262, "usage_type": "attribute"}, {"api_name": "data.PatchGeneratorPerFile", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 276, "usage_type": "attribute"}, {"api_name": "tqdm.trange", "line_number": 280, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 284, "usage_type": "call"}, {"api_name": "os.path", "line_number": 284, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 288, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 289, "usage_type": "call"}, {"api_name": "keras.backend.clear_session", "line_number": 293, "usage_type": "call"}, {"api_name": "keras.backend", "line_number": 293, "usage_type": "name"}, {"api_name": "tensorflow.reset_default_graph", "line_number": 294, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 297, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 300, "usage_type": "call"}, {"api_name": "os.path", "line_number": 300, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 301, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 304, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 308, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 309, "usage_type": "call"}]} +{"seq_id": "322918388", "text": "#-*- coding:utf-8 -*-\nfrom collections import Counter\nimport time\nimport pandas as pd\nimport numpy as np\nfrom wordcloud import WordCloud\nimport matplotlib.pyplot as plt\nfrom app01 import models, data_list\n\n\ndef avg(x):\n info = {}\n if x[-3] == '万': k = 10000;\n elif x[-3] == '千': k = 1000;\n else: k = 1;\n position = x.find('-')\n bottomSalary = 0\n topSalary = 0\n length = len(x)\n if position != -1: # 处理有‘···k-···k’的薪资上下限\n bottomSalary = float(x[:position]) * k\n topSalary = float(x[position + 1:length - 3]) * k\n if x[-1] == '年':\n bottomSalary = bottomSalary / 12\n topSalary = topSalary / 12\n elif x[-1] == '天':\n bottomSalary = bottomSalary * 30\n topSalary = topSalary * 30\n info['bottomSalary'] = bottomSalary/1000\n info['topSalary'] = topSalary/1000\n info['avgSalary'] = (bottomSalary + topSalary) / 2000\n\n return info\n\nclass HandleLagouData(object):\n def __init__(self):\n self.date = time.strftime(\"%Y-%m-%d\",time.localtime())\n\n def query_industryfield_result(self):\n info = {}\n data = models.Job.objects.all().values('companyind_text')\n data = [x['companyind_text'] for x in data if x['companyind_text'] != '']\n list = []\n for i in data:\n for j in i.split('/'):\n list.append(j)\n result_list1 = [x for x in list]\n result_list2 = [x for x in Counter(result_list1).items()]\n data = [{\"name\": x[0], \"value\": x[1]} for x in result_list2 if x[1]>4]\n name_list = [name['name'] for name in data]\n info['x_name'] = name_list\n info['data'] = data\n\n return info\n\n def get_joblist(self):\n data = models.Job.objects.all()\n data = [{\"job_href\": x.job_href, \"job_name\": x.job_name,\n \"companytype_text\": x.companytype_text, \"jobwelf_list\": x.jobwelf_list,\n \"degreefrom\": x.degreefrom, \"company_name\": x.company_name,\n \"workyear\": x.workyear, \"companysize_text\": x.companysize_text,\n \"companyind_text\": x.companyind_text, \"providesalary_text\": x.providesalary_text, }\n for x in data]\n\n #学历和工作年限数据转换\n for i in range(len(data)):\n data[i][\"degreefrom\"]=list(data_list.degreefrom_l.values())[list(data_list.degreefrom_l.keys()).index('0'+data[i]['degreefrom']+'0')]\n data[i][\"workyear\"]=list(data_list.workyear_l.values())[list(data_list.workyear_l.keys()).index('0'+data[i]['workyear']+'0')]\n\n return data\n\n # 查询薪资情况\n def query_salary_result(self):\n info = {}\n data = models.Job.objects.all().values('providesalary_text')\n data = [avg(x['providesalary_text']) for x in data if x['providesalary_text'] != '']\n name_list = []\n for i in range(len(data)):\n name_list.append(i+1)\n # 先用name再用value排序\n result1 = sorted(data, key=lambda x: (x['avgSalary'], x['bottomSalary'], x['topSalary']))\n name_list1 = [result1['topSalary'] for result1 in result1]\n name_list2 = [result1['bottomSalary'] for result1 in result1]\n name_list3 = [result1['avgSalary'] for result1 in result1]\n info['x_name'] = name_list\n info['top']= name_list1\n info['bottom'] = name_list2\n info['avg'] = name_list3\n #计算平均数\n s = 0\n for i in range(len(name_list3)):\n s += name_list3[i]\n s = s/len(name_list3)\n #计算众数\n maxkey_list = [x for x in Counter(name_list3).items()]\n maxkey = sorted(maxkey_list, key=lambda x: (x[1]), reverse=True)\n info['salary'] = {\n 'avg':s,\n 'num':maxkey[0],\n }\n\n return (info)\n\n\n # 查询工作年限情况\n def query_workyear_result(self):\n info = {}\n data = models.Job.objects.all().values('workyear')\n data = [x['workyear'] for x in data]\n data = np.char.replace(data, '','0')\n data = [list(data_list.workyear_l.values())[list(data_list.workyear_l.keys()).index(i)] for i in data]\n\n result = pd.Series(data)\n # 处理原始数据\n result_list1 = [x for x in result]\n # 计数,并返回\n result_list2 = [x for x in Counter(result_list1).items()]\n result = [{\"name\": x[0], \"value\": x[1]} for x in result_list2]\n name_list = [name['name'] for name in result]\n info['x_name'] = name_list\n info['data'] = result\n\n return info\n\n # 查询学历信息\n def query_education_result(self):\n info = {}\n data = models.Job.objects.all().values('degreefrom')\n data = [x['degreefrom'] for x in data ] #if x['degreefrom'] != '' ]\n data = np.char.replace(data, '','0')\n data = [list(data_list.degreefrom_l.values())[list(data_list.degreefrom_l.keys()).index(i)] for i in data]\n result = pd.Series(data)\n\n # 处理原始数据\n result_list1 = [x for x in result]\n # 计数,并返回\n result_list2 = [x for x in Counter(result_list1).items()]\n result = [{\"name\": x[0], \"value\": x[1]} for x in result_list2]\n name_list = [name['name'] for name in result]\n info['x_name'] = name_list\n info['data'] = result\n\n return info\n\n def query_city_result(self):\n info = {}\n data1 = models.Job.objects.all().values('workarea_text','providesalary_text')\n data = [[x['workarea_text'], x['providesalary_text']] for x in data1 if x['workarea_text'] != '异地招聘']\n list = []\n for i in data:\n x = 0\n for j in i[0].split('-'):\n if x == 0:\n list.append(j)\n x = 2\n result_list2 = [x for x in Counter(list).items()]\n\n list2 = []\n for i in result_list2:\n all_s = [avg(x['providesalary_text']) for x in data1 if\n x['workarea_text'] == i[0] and x['providesalary_text'] != '']\n name_list3 = [x['avgSalary'] for x in all_s]\n if len(name_list3) != 0:\n # 计算平均数\n s = 0\n for j in range(len(name_list3)):\n s += name_list3[j]\n s = s / len(name_list3)\n list2.append([i[0], i[1], round(s, 2)])\n\n info['x_name_city'] = [{\"name\": x[0], \"value\": x[1]} for x in list2]\n info['x_name_avg'] = [{\"name\": x[0], \"value\": x[2]} for x in list2]\n info['x_name'] = [name['name'] for name in info['x_name_city']]\n\n return info\n #融资情况\n def query_financestage_result(self):\n info = {}\n data = models.Job.objects.all().values('companytype_text')\n data = [x['companytype_text'] for x in data if x['companytype_text'] != '']\n result = pd.Series(data)\n # 处理原始数据\n result_list1 = [x for x in result ]\n # 计数,并返回\n result_list2 = [x for x in Counter(result_list1).items()]\n result = [{\"name\": x[0], \"value\": x[1]} for x in result_list2]\n name_list = [name['name'] for name in result]\n info['x_name'] = name_list\n info['data'] = result\n return info\n\n # 公司规模\n def query_companysize_result(self):\n info = {}\n data = models.Job.objects.all().values('companysize_text')\n data = [x['companysize_text'] for x in data if x['companysize_text'] != '']\n result = pd.Series(data)\n # 处理原��数据\n result_list1 = [x for x in result]\n # 计数,并返回\n result_list2 = [x for x in Counter(result_list1).items()]\n result = [{\"name\": x[0], \"value\": x[1]} for x in result_list2]\n name_list = [name['name'] for name in result]\n info['x_name'] = name_list\n info['data'] = result\n return info\n\n #公司标签\n def query_tag_result(self):\n info = {}\n data = models.Job.objects.filter(keyword=self.key_word).values('jobwelf_list')\n data = [x['jobwelf_list'] for x in data if x['jobwelf_list'] != '']\n result = pd.Series(data)\n # 处理原始数据\n data1 = str(data).replace(\"\\\"\", \"\").replace(\" \", \"\") \\\n .replace(\"'\", \"\").replace(\"[\", \"\").replace(\"]\", \"\").replace(\",,,\", \",\")\n wordcloud = WordCloud(font_path='msyh.ttc', height=700, width=1000, background_color='white')\n wordcloud.generate(data1)\n plt.imshow(wordcloud)\n wordcloud.to_file('static/img/all_key.jpg')\n plt.axis(\"off\")\n return data1\n\ndata_a = HandleLagouData()\n", "sub_path": "app01/data_all.py", "file_name": "data_all.py", "file_ext": "py", "file_size_in_byte": 8691, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "time.strftime", "line_number": 37, "usage_type": "call"}, {"api_name": "time.localtime", "line_number": 37, "usage_type": "call"}, {"api_name": "app01.models.Job.objects.all", "line_number": 41, "usage_type": "call"}, {"api_name": "app01.models.Job", "line_number": 41, "usage_type": "attribute"}, {"api_name": "app01.models", "line_number": 41, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 48, "usage_type": "call"}, {"api_name": "app01.models.Job.objects.all", "line_number": 57, "usage_type": "call"}, {"api_name": "app01.models.Job", "line_number": 57, "usage_type": "attribute"}, {"api_name": "app01.models", "line_number": 57, "usage_type": "name"}, {"api_name": "app01.data_list.degreefrom_l.values", "line_number": 67, "usage_type": "call"}, {"api_name": "app01.data_list.degreefrom_l", "line_number": 67, "usage_type": "attribute"}, {"api_name": "app01.data_list", "line_number": 67, "usage_type": "name"}, {"api_name": "app01.data_list.degreefrom_l.keys", "line_number": 67, "usage_type": "call"}, {"api_name": "app01.data_list.workyear_l.values", "line_number": 68, "usage_type": "call"}, {"api_name": "app01.data_list.workyear_l", "line_number": 68, "usage_type": "attribute"}, {"api_name": "app01.data_list", "line_number": 68, "usage_type": "name"}, {"api_name": "app01.data_list.workyear_l.keys", "line_number": 68, "usage_type": "call"}, {"api_name": "app01.models.Job.objects.all", "line_number": 75, "usage_type": "call"}, {"api_name": "app01.models.Job", "line_number": 75, "usage_type": "attribute"}, {"api_name": "app01.models", "line_number": 75, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 95, "usage_type": "call"}, {"api_name": "app01.models.Job.objects.all", "line_number": 108, "usage_type": "call"}, {"api_name": "app01.models.Job", "line_number": 108, "usage_type": "attribute"}, {"api_name": "app01.models", "line_number": 108, "usage_type": "name"}, {"api_name": "numpy.char.replace", "line_number": 110, "usage_type": "call"}, {"api_name": "numpy.char", "line_number": 110, "usage_type": "attribute"}, {"api_name": "app01.data_list.workyear_l.values", "line_number": 111, "usage_type": "call"}, {"api_name": "app01.data_list.workyear_l", "line_number": 111, "usage_type": "attribute"}, {"api_name": "app01.data_list", "line_number": 111, "usage_type": "name"}, {"api_name": "app01.data_list.workyear_l.keys", "line_number": 111, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 113, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 117, "usage_type": "call"}, {"api_name": "app01.models.Job.objects.all", "line_number": 128, "usage_type": "call"}, {"api_name": "app01.models.Job", "line_number": 128, "usage_type": "attribute"}, {"api_name": "app01.models", "line_number": 128, "usage_type": "name"}, {"api_name": "numpy.char.replace", "line_number": 130, "usage_type": "call"}, {"api_name": "numpy.char", "line_number": 130, "usage_type": "attribute"}, {"api_name": "app01.data_list.degreefrom_l.values", "line_number": 131, "usage_type": "call"}, {"api_name": "app01.data_list.degreefrom_l", "line_number": 131, "usage_type": "attribute"}, {"api_name": "app01.data_list", "line_number": 131, "usage_type": "name"}, {"api_name": "app01.data_list.degreefrom_l.keys", "line_number": 131, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 132, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 137, "usage_type": "call"}, {"api_name": "app01.models.Job.objects.all", "line_number": 147, "usage_type": "call"}, {"api_name": "app01.models.Job", "line_number": 147, "usage_type": "attribute"}, {"api_name": "app01.models", "line_number": 147, "usage_type": "name"}, {"api_name": "collections.Counter", "line_number": 156, "usage_type": "call"}, {"api_name": "app01.models.Job.objects.all", "line_number": 179, "usage_type": "call"}, {"api_name": "app01.models.Job", "line_number": 179, "usage_type": "attribute"}, {"api_name": "app01.models", "line_number": 179, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 181, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 185, "usage_type": "call"}, {"api_name": "app01.models.Job.objects.all", "line_number": 195, "usage_type": "call"}, {"api_name": "app01.models.Job", "line_number": 195, "usage_type": "attribute"}, {"api_name": "app01.models", "line_number": 195, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 197, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 201, "usage_type": "call"}, {"api_name": "app01.models.Job.objects.filter", "line_number": 211, "usage_type": "call"}, {"api_name": "app01.models.Job", "line_number": 211, "usage_type": "attribute"}, {"api_name": "app01.models", "line_number": 211, "usage_type": "name"}, {"api_name": "pandas.Series", "line_number": 213, "usage_type": "call"}, {"api_name": "wordcloud.WordCloud", "line_number": 217, "usage_type": "call"}, {"api_name": "wordcloud.generate", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 219, "usage_type": "name"}, {"api_name": "wordcloud.to_file", "line_number": 220, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 221, "usage_type": "name"}]} +{"seq_id": "212301715", "text": "import json, re\nfrom datetime import datetime\n\n\nclass JSONParser():\n\n def __init__ (self, path):\n with open(path, encoding='utf-8') as j:\n self.post = json.load(j)['GraphImages'] \n self.length = len(self.post) \n self.account_name = self.post[0]['username']\n self.shortcodes = [self.get_shortcode(i) for i in range(self.length)]\n\n def get_timestamp(self,idx):\n timestamp = self.post[idx]['taken_at_timestamp']\n return datetime.fromtimestamp(timestamp)\n\n def get_caption(self,idx):\n try:\n caption = self.post[idx]['edge_media_to_caption']['edges'][0]['node']['text']\n return caption\n \n except KeyError:\n caption = self.post[idx]['GraphImages'][idx]['comments']['data']\n return caption\n\n except IndexError:\n return None\n\n def get_owner(self,idx):\n owner = self.post[idx]['username']\n return owner\n\n def get_likesnum(self,idx):\n like = self.post[idx]['edge_media_preview_like']['count']\n return like\n\n def get_commentsnum(self,idx):\n comments = self.post[idx]['edge_media_to_comment']['count']\n return comments\n\n def get_videoview(self,idx):\n if self.post[idx]['is_video']:\n return self.post[idx]['video_view_count']\n else:\n return None\n\n def get_shortcode(self,idx): \n return self.post[idx]['shortcode']\n\n def get_post(self,idx):\n post = {'shortcode': self.get_shortcode(idx),\n 'caption':self.get_caption(idx),\n 'comments':self.get_commentsnum(idx), \n 'likes': self.get_likesnum(idx),\n 'tag_num':len(self.get_tagged(idx)), \n 'video_viewer':self.get_videoview(idx),\n 'time_posted':self.get_timestamp(idx)}\n comments = self.get_chunk(idx)\n return {'post': post, 'comments':comments}\n\n def get_chunk(self,idx):\n comments_list = []\n chunk = self.post[idx]['comments']['data']\n for comment in chunk:\n timestamp = comment['created_at']\n owner = comment['owner']['username']\n text = comment['text']\n comments_list.append({\n 'time_created': datetime.fromtimestamp(timestamp),\n 'owner':owner,\n 'text':text})\n return comments_list\n\n def get_tagged(self,idx):\n string = ' '.join([tuple(i.values())[2] for i in self.get_chunk(idx)])\n tagged = re.findall(r'@\\S+', string)\n return tagged\n\n \n", "sub_path": "jsonparser.py", "file_name": "jsonparser.py", "file_ext": "py", "file_size_in_byte": 2556, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "json.load", "line_number": 9, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "name"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 70, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 70, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 77, "usage_type": "call"}]} +{"seq_id": "108495567", "text": "# Author : Robert Geraghty\n# Contact : robertrgeraghty@gmail.com\n# Date : Feb 16, 2020\n\nimport random\nimport time\nimport numpy as np\nimport random\nimport time\nimport numpy as np\n\ntry:\n from CS5313_Localization_Env import maze\nexcept:\n print(\n 'Problem finding CS5313_Localization_Env.maze... Trying to \"import maze\" only...'\n )\n try:\n import maze\n\n print(\"Successfully imported maze\")\n except Exception as ex:\n print(\"Could not import maze\")\n print(\"----->LOOK HERE FOR EXCEPTION MESSAGE<-----\")\n print(ex)\n print(\"----->LOOK HERE FOR EXCEPTION MESSAGE<-----\")\ntry:\n from CS5313_Localization_Env import RobotLocalization as viz\nexcept:\n print(\n 'Problem finding CS5313_Localization_Env.RobotLocalization... Trying to \"import RobotLocalization\" only...'\n )\n try:\n import RobotLocalization as viz\n\n print(\"Successfully imported RobotLocalization\")\n except Exception as ex:\n print(\"Could not import RobotLocalization\")\n print(\"----->LOOK HERE FOR EXCEPTION MESSAGE<-----\")\n print(ex)\n print(\"----->LOOK HERE FOR EXCEPTION MESSAGE<-----\")\ntry:\n from CS5313_Localization_Env import localization_env as le\nexcept:\n print(\n 'Problem finding CS5313_Localization_Env.localization_env... Trying to \"import localization_env\" only...'\n )\n try:\n import localization_env as le\n\n print(\"Successfully imported localization_env\")\n except Exception as ex:\n print(\"Could not import localization_env\")\n print(\"----->LOOK HERE FOR EXCEPTION MESSAGE<-----\")\n print(ex)\n print(\"----->LOOK HERE FOR EXCEPTION MESSAGE<-----\")\nfrom enum import Enum\n\n\n# Change this to true to print out information on the robot location and heading\nprintouts = True\n# Change this to true inorder to print out the map as a dataframe to console every time move() is called, as well as the Transition Tables to csv files named \"heading.csv\" and \"location.csv\". Won't do anything if printouts is false expect import pandas\ndf = False\nif df:\n from pandas import DataFrame\n\n\nclass Directions(Enum):\n \"\"\"An Enum containing the directions S, E, N, W, and St (stationary) and their respective (x, y) movement tuples. Ex. S = (0, 1) meaning down one row, and stationary in the columns.\"\"\"\n\n S = (0, 1)\n E = (1, 0)\n N = (0, -1)\n W = (-1, 0)\n St = (0, 0)\n\n def get_ortho(self, value):\n \"\"\" Return the Direction Enums orthogonal to the given direction\n\n Arguements:\\n\n value -- The given direction for which the orthogonal directions will be based on.\\n\n\n Returns:\\n\n A list of directions orthogonal to the given direction.\n \"\"\"\n if value in [self.N, self.S]:\n return [self.W, self.E]\n return [self.N, self.S]\n\n\nclass Headings(Enum):\n \"\"\"An enum containing the headings S, E, N, W and their respective (x, y) movement tuples\"\"\"\n\n S = (0, 1)\n E = (1, 0)\n N = (0, -1)\n W = (-1, 0)\n\n def get_ortho(self, value):\n \"\"\" Return the Headings Enums orthogonal to the given heading\n\n Arguements:\\n\n value -- The given heading for which the orthogonal heading will be based on.\\n\n\n Returns:\\n\n A list of headings orthogonal to the given heading.\n \"\"\"\n if value in [self.N, self.S]:\n return [self.W, self.E]\n return [self.N, self.S]\n\n\nclass Environment:\n \"\"\" An environment for testing a randomly moving robot around a maze.\n\n Important Class Variables\\n\n map -- The map of the the maze. A 2d list of lists in the form list[x][y] where a value of 1 signifies there is a wall, 0 signifies the cell is traversable, and 'x' denotes the robot location.\\n\n location_transitions -- The table of transition probabilities for each cell. Format is [x][y][heading][direction] which will return the probabilities of moving the direction, given the robot's current x, y, and heading.\\n\n heading_transitions -- The table of transition probabilities for the headings given each cell. Format is [x][y][heading][heading] which will return the probabilities of each heading for the next time step given the robot's current x, y, and heading.\\n\n robot_location -- The current location of the robot, given as a tuple in the for (x, y).\n robot_heading -- The current heading of the robot, given as a Headings enum.\n \"\"\"\n\n def __init__(\n self,\n action_bias,\n observation_noise,\n action_noise,\n dimensions,\n seed=None,\n window_size=[750, 750],\n ):\n \"\"\"Initializes the environment. The robot starts in a random traversable cell.\n\n Arguements:\\n\n action_bias -- Provides a bias for the robots actions. Positive values increase the likelihood of South and East movements, and negative favor North and West. (float in range -1-1)\\n\n observation_noise -- The probability that any given observation value will flip values erroneously. (float in range 0-1)\\n\n action_noise -- The probability that an action will move either direction perpendicular to the inteded direction. (float in range 0-1)\\n\n dimensions -- The dimensions of the map, given in the form (x,y). (tuple in range (1+, 1+))\\n\n seed (optional) -- The random seed value. (int) default=10\\n\n window_size(optional) -- The [x, y] size of the display. Default is [750, 750]. Should be the same aspect ratio as the maze to avoid strange looking graphics.\n\n Return:\\n\n No return\n \"\"\"\n # the pygame state\n self.running = True\n\n # Step counter\n self.steps = 0\n\n # save the bias, noise, and map sizze parameters\n self.action_bias = action_bias\n self.observation_noise = observation_noise\n self.action_noise = action_noise\n self.dimensions = dimensions\n\n # set the random seed and display it\n self.seed = seed if seed != None else random.randint(1, 10000)\n random.seed(self.seed)\n\n # creat the map and list of free cells\n self.map = maze.make_maze(dimensions[0], dimensions[1], seed)\n self.free_cells = [\n (x, y)\n for x in range(dimensions[0])\n for y in range(dimensions[1])\n if self.map[x][y] == 0\n ]\n\n # create the transistion table\n self.location_transitions = self.create_locations_table()\n self.headings_transitions = self.create_headings_table()\n\n if df:\n DataFrame(self.location_transitions).transpose().to_csv(\"location.csv\")\n DataFrame(self.headings_transitions).transpose().to_csv(\"heading.csv\")\n\n # set the robot location and print\n self.robot_location = self.free_cells[\n random.randint(0, len(self.free_cells) - 1)\n ]\n\n self.location_priors, self.heading_priors = self.compute_prior_probabilities()\n self.observation_tables = self.create_observation_tables()\n self.map[self.robot_location[0]][self.robot_location[1]] = \"x\"\n\n # Set the robot heading\n self.robot_heading = random.choice(\n [\n h\n for h in Headings\n if self.traversable(self.robot_location[0], self.robot_location[1], h)\n ]\n )\n\n # gen initial headings probs\n probs = {}\n # prob_sum = 0\n for h in le.Headings:\n # num = random.random()\n probs[h] = 1\n # prob_sum += num\n # for h in le.Headings:\n # probs[h] /= prob_sum\n\n # init viz\n self.window_size = window_size\n self.game = viz.Game()\n self.game.init_pygame(self.window_size)\n self.game.update(\n self.map,\n self.robot_location,\n self.robot_heading,\n [[0] * self.dimensions[1]] * self.dimensions[0],\n probs,\n )\n self.game.display()\n print('Location Probs', [[0] * self.dimensions[1]] * self.dimensions[0])\n print('Heading Probs', probs)\n\n if printouts:\n print(\"Random seed:\", self.seed)\n print(\"Robot starting location:\", self.robot_location)\n print(\"Robot starting heading:\", self.robot_heading)\n\n if df:\n print(DataFrame(self.map).transpose())\n\n def compute_prior_probabilities(self):\n location_priors = {}\n for cell in self.free_cells:\n location_priors[cell] = 1 / len(self.free_cells)\n\n heading_priors = {}\n for heading in Headings:\n heading_priors[heading] = 0\n for cell in self.free_cells:\n for heading2 in Headings:\n heading_priors[heading] += self.headings_transitions[cell[0]][\n cell[1]\n ][heading2][heading]\n heading_priors[heading] /= len(self.free_cells) * 4\n\n return location_priors, heading_priors\n\n def random_dictionary_sample(self, probs):\n sample = random.random()\n prob_sum = 0\n for key in probs.keys():\n prob_sum += probs[key]\n if prob_sum > sample:\n return key\n\n def move(self):\n \"\"\"Updates the robots heading and moves the robot to a new position based off of the transistion table and its current location and new heading. Also redraws the visualization\n\n Arguments:\\n\n location_probs: The probability of the robot being in any (x, y) cell in the map. Created from your project code. Format list[x][y] = float\\n\n headings_probs: The probability of the robot's current heading being any given heading. Created from your project code. Format dict{ : float, : float,... }\\n\n\n Return:\\n\n A list of the observations modified by the observation noise, where 1 signifies a wall and 0 signifies an empty cell. The order of the list is [S, E, N, W]\n \"\"\"\n\n # get the new location\n self.map[self.robot_location[0]][self.robot_location[1]] = 0\n probs = self.location_transitions[self.robot_location[0]][\n self.robot_location[1]\n ][self.robot_heading]\n\n direction = self.random_dictionary_sample(probs)\n\n self.robot_location = (\n self.robot_location[0] + direction.value[0],\n self.robot_location[1] + direction.value[1],\n )\n\n self.map[self.robot_location[0]][self.robot_location[1]] = \"x\"\n\n # Get the new heading\n h_probs = self.headings_transitions[self.robot_location[0]][\n self.robot_location[1]\n ][self.robot_heading]\n self.robot_heading = self.random_dictionary_sample(h_probs)\n\n # # get the new location\n # self.map[self.robot_location[0]][self.robot_location[1]] = 0\n # probs = self.location_transitions[self.robot_location[0]][\n # self.robot_location[1]\n # ][self.robot_heading]\n\n self.steps += 1\n # return the new observation\n if printouts:\n print()\n print(\n \"---------------------------Steps: \"\n + str(self.steps)\n + \" ---------------------------------\"\n )\n print(self.robot_location)\n print(self.robot_heading)\n print(direction)\n if df:\n print(DataFrame(self.map).transpose())\n\n # if self.running:\n # self.game.update(\n # self.map,\n # self.robot_location,\n # self.robot_heading,\n # location_probs,\n # headings_probs,\n # )\n # self.running = self.game.display()\n # else:\n # print(\"Pygame closed. Quiting...\")\n # self.game.quit()\n\n return self.observe()\n\n def update(self, location_probs, headings_probs):\n if self.running:\n self.game.update(\n self.map,\n self.robot_location,\n self.robot_heading,\n location_probs,\n headings_probs,\n )\n self.running = self.game.display()\n else:\n print(\"Pygame closed. Quiting...\")\n self.game.quit()\n\n def observe(self):\n \"\"\"Observes the walls at the current robot location\n\n Return:\\n\n A list of the observations modified by the observation noise, where 1 signifies a wall and 0 signifies an empty cell. The order of the list is [S, E, N, W]\n \"\"\"\n # get the neighboring walls to create the true observation table\n observations = [\n 0\n if self.traversable(\n self.robot_location[0], self.robot_location[1], direction\n )\n else 1\n for direction in Directions\n if direction != Directions.St\n ]\n # apply observation noise\n observations = [\n 1 - x if random.random() < self.observation_noise else x\n for x in observations\n ]\n return observations\n\n def create_observation_tables(self):\n observation_table = []\n for x in range(self.dimensions[0]):\n observation_table.append({})\n for y in range(self.dimensions[1]):\n if self.map[x][y] == 1:\n observation_table[x][y] = -1\n continue\n\n observation_table[x][y] = {}\n\n observations = [\n 0\n if self.traversable(\n x, y, direction\n )\n else 1\n for direction in Directions\n if direction != Directions.St\n ]\n\n for a in [0, 1]:\n for b in [0, 1]:\n for c in [0, 1]:\n for d in [0, 1]:\n potential_obs = (a, b, c, d)\n num_wrong = 0\n for i in range(len(observations)):\n if observations[i] != potential_obs[i]:\n num_wrong += 1\n prob = (1 - self.observation_noise) ** (len(\n observations\n )-num_wrong) * self.observation_noise ** num_wrong\n observation_table[x][y][potential_obs] = prob\n return observation_table\n\n def create_locations_table(self):\n temp = []\n # loop through the x dim\n for x in range(self.dimensions[0]):\n temp.append([])\n # loop through the y dim\n for y in range(self.dimensions[1]):\n # If the cell is not traversable than set its value in the transition table to -1\n if self.map[x][y] == 1:\n temp[x].append(-1)\n continue\n temp[x].append({})\n for heading in list(Headings):\n probs = {}\n\n # Compute Transistion probabilities ignoring walls\n for direction in Directions:\n if direction.name == heading.name:\n probs[direction] = 1 - self.action_noise\n elif direction in Directions.get_ortho(\n Directions, Directions[heading.name]\n ):\n probs[direction] = self.action_noise / 2\n else:\n probs[direction] = 0\n # init stationary probability\n probs[Directions.St] = 0\n\n # account for walls. If there is a wall for one of the transition probabilities add the probability to the stationary probability and set the transisition probability to 0\n for direction in Directions:\n if not self.traversable(x, y, direction):\n probs[Directions.St] += probs[direction]\n probs[direction] = 0\n\n # add the new transistion probabilities\n temp[x][y].update({heading: probs})\n return temp\n\n def create_headings_table(self):\n temp = []\n # loop through the x dim\n for x in range(self.dimensions[0]):\n temp.append([])\n # loop through the y dim\n for y in range(self.dimensions[1]):\n # If the cell is not traversable than set its value in the transition table to -1\n if self.map[x][y] == 1:\n temp[x].append(-1)\n continue\n temp[x].append({})\n\n for heading in Headings:\n probs = {}\n # Handle case when the current heading is traversable\n if self.traversable(x, y, heading):\n for new_heading in Headings:\n if heading == new_heading:\n probs[new_heading] = 1\n else:\n probs[new_heading] = 0\n temp[x][y].update({heading: probs})\n continue\n\n # If the current heading is not traversable\n\n # Find which headings are available\n headings_traversablity = {}\n for new_heading in Headings:\n if self.traversable(x, y, new_heading):\n headings_traversablity[new_heading] = 1\n else:\n headings_traversablity[new_heading] = 0\n\n # Sum these values for later arithmetic\n total_traversable = sum(list(headings_traversablity.values()))\n se_traversable = (\n headings_traversablity[Headings.S]\n + headings_traversablity[Headings.E]\n )\n nw_traversable = (\n headings_traversablity[Headings.N]\n + headings_traversablity[Headings.W]\n )\n\n # Compute the heading probabilities for traversable headings\n for new_heading in Headings:\n if self.traversable(x, y, new_heading):\n if new_heading in [Headings.S, Headings.E]:\n probs[new_heading] = (\n 1 / total_traversable\n + self.action_bias / se_traversable\n )\n\n else:\n probs[new_heading] = (\n 1 / total_traversable\n - self.action_bias / nw_traversable\n )\n else:\n probs[new_heading] = 0\n\n # normalize heading probabilities\n probs_sum = sum([probs[x] for x in Headings])\n for h in Headings:\n probs[h] /= probs_sum\n\n # add the new transistion probabilities\n temp[x][y].update({heading: probs})\n return temp\n\n def traversable(self, x, y, direction):\n \"\"\"\n Returns true if the cell to the given direction of (x,y) is traversable, otherwise returns false.\n\n Arguements:\\n\n row -- the x coordinate of the initial cell\\n\n col -- the y coordinate of the initial cell\\n\n direction -- the direction of the cell to check for traversablility. Type: localization_env.Directions enum or localization_env.Headings\\n\n\n Return:\\n\n A boolean signifying whether the cell to the given direction is traversable or not\n \"\"\"\n # see if the cell in the direction is traversable. If statement to handle out of bounds errors\n if (\n x + direction.value[0] >= 0\n and x + direction.value[0] < self.dimensions[0]\n and y + direction.value[0] >= 0\n and y + direction.value[0] < self.dimensions[1]\n ):\n if self.map[x + direction.value[0]][y + direction.value[1]] == 0:\n return True\n return False\n\n def dummy_location_and_heading_probs(self):\n \"\"\"\n Returns a dummy location probability table and a dummy heading probability dictionary for testing purposes\n\n Returns:\\n\n location probability table: Format is list[x][y] = float between (0-1)\\n\n Headings probability table: Format is dict{ : float between (0-1)}\n \"\"\"\n\n loc_probs = list()\n sum_probs = 0\n for x in range(self.dimensions[0]):\n loc_probs.append([])\n for y in range(self.dimensions[1]):\n if self.map[x][y] == 1:\n loc_probs[x].append(0.0)\n else:\n num = random.random()\n loc_probs[x].append(num)\n sum_probs += num\n for x in range(self.dimensions[0]):\n for y in range(self.dimensions[1]):\n loc_probs[x][y] /= sum_probs\n\n hed_probs = {}\n sample = np.random.rand(4)\n sample = (sample / np.sum(sample)).tolist()\n i = 0\n for heading in le.Headings:\n hed_probs[heading] = sample[i]\n i += 1\n\n return loc_probs, hed_probs\n\n\nif __name__ == \"__main__\":\n print(\"TEST\")\n env = Environment(0.1, 0.1, 0.2, (10, 10), window_size=[1000, 1000])\n # print(\"Starting test. Press to make move\")\n location, heading = env.dummy_location_and_heading_probs()\n\n done = False\n while env.running:\n\n observation = env.move(location, heading)\n\n if printouts:\n print(observation)\n time.sleep(0.25)\n\n", "sub_path": "Project 2/Code/CS5313_Localization_Env/localization_env.py", "file_name": "localization_env.py", "file_ext": "py", "file_size_in_byte": 22270, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "enum.Enum", "line_number": 68, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 91, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 159, "usage_type": "call"}, {"api_name": "random.seed", "line_number": 160, "usage_type": "call"}, {"api_name": "maze.make_maze", "line_number": 163, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 176, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 177, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 181, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 189, "usage_type": "call"}, {"api_name": "localization_env.Headings", "line_number": 200, "usage_type": "attribute"}, {"api_name": "RobotLocalization.Game", "line_number": 209, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 228, "usage_type": "call"}, {"api_name": "random.random", "line_number": 248, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 306, "usage_type": "call"}, {"api_name": "random.random", "line_number": 355, "usage_type": "call"}, {"api_name": "random.random", "line_number": 546, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 554, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 554, "usage_type": "attribute"}, {"api_name": "numpy.sum", "line_number": 555, "usage_type": "call"}, {"api_name": "localization_env.Headings", "line_number": 557, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 577, "usage_type": "call"}]} +{"seq_id": "316771380", "text": "from azure.cognitiveservices.vision.computervision import ComputerVisionClient\nfrom azure.cognitiveservices.vision.computervision.models import TextOperationStatusCodes\nfrom azure.cognitiveservices.vision.computervision.models import TextRecognitionMode\nfrom azure.cognitiveservices.vision.computervision.models import VisualFeatureTypes\nfrom msrest.authentication import CognitiveServicesCredentials\n\nfrom array import array\nimport os\nfrom PIL import Image\nimport sys\nimport time\nimport io\nimport json\nimport requests\nimport re\nimport cv2\nimport math\nimport numpy as np\nimport ImagePreprocess\nimport base64\nfrom Levenshtein import distance as Ldistance\nimport operator\n# from termcolor import colored\nfrom ExceptionsModule import *\n\n\ndef distance(p1, p2):\n \"\"\"\n calculate distance (l2 norm) between two given points (2d)\n Arguments:\n p1, p2 (points, (x,y)) - two points\n Returns:\n the distance\n \"\"\"\n xdis = (p1[0]-p2[0])**2\n ydis = (p1[1]-p2[1])**2\n return math.sqrt(xdis + ydis)\n\n\ndef fix_corners(current_corners, last_corners):\n \"\"\"\n takes a set of corners and replaces each of the last\n corners with the closest one of the new corners (if we\n have less than 4 new ones - completes the set to four\n with the old ones)\n Arguments:\n current_corners - list/dictionary containing 0-4 points \n representing the new corners found.\n last_corners - list/dictionary containing 4 points\n representing the last complete set of 4 corners\n Returns:\n the fixed set of corners ((4,2) list)\n \"\"\"\n fixed_corners = [[-1,-1], [-1,-1], [-1,-1], [-1,-1]]\n for cp in current_corners:\n min_dis = 500000000\n curr_min = 0\n for i in range(4):\n dis = distance(last_corners[i], cp)\n if dis < min_dis:\n min_dis = dis\n curr_min = i\n fixed_corners[curr_min] = cp\n for i in range(4):\n if fixed_corners[i]==[-1,-1]:\n fixed_corners[i] = last_corners[i]\n return fixed_corners\n\n\ndef order_points(pts):\n \"\"\"\n re-arranges a set of given 4 points to the order\n (tl, tr, br, bl)\n Arguments:\n pts - a (4,2) shaped list containing the points\n Returns:\n rect - the ordered pts set\n \"\"\"\n rect = np.zeros((4, 2), dtype = \"float32\")\n num_pts = np.array(pts)\n s = num_pts.sum(axis = 1)\n rect[0] = num_pts[np.argmin(s)]\n rect[2] = num_pts[np.argmax(s)]\n diff = np.diff(num_pts, axis = 1)\n rect[1] = num_pts[np.argmin(diff)]\n rect[3] = num_pts[np.argmax(diff)]\n return rect\n\n\ndef four_point_transform(image, pts):\n \"\"\"\n takes an image and a set of 4 corners - reshapes the\n image based on the corners - trapezoid to rectangle\n Arguments:\n image - the image\n pts - a (4,2) shaped list containing the points(corners)\n Returns:\n warped - the reshaped image\n \"\"\"\n # obtain a consistent order of the points and unpack them\n # individually\n rect = order_points(pts)\n (tl, tr, br, bl) = rect\n # compute the width of the new image, which will be the\n # maximum distance between bottom-right and bottom-left\n # x-coordiates or the top-right and top-left x-coordinates\n widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))\n widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))\n maxWidth = max(int(widthA), int(widthB))\n # compute the height of the new image, which will be the\n # maximum distance between the top-right and bottom-right\n # y-coordinates or the top-left and bottom-left y-coordinates\n heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))\n heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))\n maxHeight = max(int(heightA), int(heightB))\n # now that we have the dimensions of the new image, construct\n # the set of destination points to obtain a \"birds eye view\",\n # (i.e. top-down view) of the image, again specifying points\n # in the top-left, top-right, bottom-right, and bottom-left\n # order\n dst = np.array([\n [0, 0],\n [maxWidth - 1, 0],\n [maxWidth - 1, maxHeight - 1],\n [0, maxHeight - 1]], dtype = \"float32\")\n # compute the perspective transform matrix and then apply it\n M = cv2.getPerspectiveTransform(rect, dst)\n # copy = image.copy()\n warped = cv2.warpPerspective(image, M, (maxWidth, maxHeight))\n # return the warped image\n return warped\n\n\ndef detect_markers(frame):\n \"\"\"\n detects the Aruco corner markers\n Arguments:\n frame - an image\n Returns:\n fixed_corners - a (4,2) list representing the middle of \n the preceived corner markers\n \"\"\"\n '''cv2.imshow('frame',frame)\n if cv2.waitKey(100) & 0xFF == ord('q'):\n pass\n time.sleep(10)'''\n \n '''frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n \n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(64,64))\n frame = clahe.apply(frame)'''\n \n dictionary = cv2.aruco.Dictionary_get(cv2.aruco.DICT_4X4_1000)\n parameters = cv2.aruco.DetectorParameters_create()\n # print('detencting')\n fixed_corners = []\n markerCorners, markerIds, rejectedCandidates = cv2.aruco.detectMarkers(frame, dictionary, parameters=parameters)\n clone = frame.copy()\n for mc in markerCorners: # top left, top right, bottom right and bottom left.\n # cv2.rectangle(clone, (mc[0][3][0], mc[0][3][1]), (mc[0][1][0], mc[0][1][1]), (0, 255, 0), 2)\n fixed_corners.append((np.mean([mc[0][0][0], mc[0][1][0], mc[0][2][0], mc[0][3][0]]),np.mean([mc[0][0][1], mc[0][1][1], mc[0][2][1], mc[0][3][1]])))\n \n #cv2.imshow(\"Window\", clone)\n #cv2.waitKey(1)\n #time.sleep(3)\n return fixed_corners\n\n\ndef create_areas(area_dict, img):\n \"\"\"\n creates a set of formatted 'areas' - images with the\n the coordinates of their bottom left corner (in the\n source image):\n areas - [area's image, minimum height, minimum width]\n Arguments:\n area_dict - a dictionary containing the areas, it's keys\n represent the areas names and the values are:\n [hmin, hmax, wmin, wmax] in the source image\n img - the source image\n Returns:\n areas - list of the newly formatted areas\n \"\"\"\n s = img.shape\n height, width = s[0], s[1]\n areas = []\n for key, value in area_dict.items():\n hmin, hmax, wmin, wmax = value\n hmin *= height\n hmax *= height\n wmin *= width\n wmax *= width\n new_area = [img[math.ceil(hmin):math.ceil(hmax), math.ceil(wmin):math.ceil(wmax)], hmin, wmin]\n areas.append(new_area)\n return areas\n\ndef create_areas_black(area_dict, img): \n \"\"\" takes the frame's areas-of-interest, and the frame itself, and blackens the irrelevant parts of the frame\n\n Arguments:\n area_dict -- dict of areas lists (in percentage)\n img -- frame\n\n Returns:\n areas -- list with the original blackend frame\n \"\"\"\n #blocks irelevant area and return the original image with blocked part\n s = img.shape\n height, width = s[0], s[1]\n areas = []\n max_x_val = 0\n for key, value in area_dict.items():\n hmin, hmax, wmin, wmax = value\n if hmin == 0 and hmax == 1: #vertical window\n if wmin == 0:\n black_window_x_left = math.ceil(wmax * width) \n black_window_x_right = 1\n else:\n black_window_x_left = 0\n black_window_x_right = math.ceil(wmin * width)\n else: #horizontal window\n black_window_y_up = 0\n black_window_y_down = math.ceil(hmin * height)\n\n original_picture_with_black_irelevant_window = cv2.rectangle(img, (black_window_x_left, black_window_y_up), (black_window_x_right, black_window_y_down), (0, 255, 0), -1) #block irelevant area\n single_area = [original_picture_with_black_irelevant_window, 0, 0]\n areas.append(single_area)\n return areas\n\n\ndef transform_coords(coords, area):\n \"\"\"\n transform coordiantes from an area to coords in the \n source image\n Arguments:\n coords - coordinates in the area image\n area - area from the list created by create_areas\n Returns:\n fixed_coords - the coordinates in the source image\n \"\"\"\n fixed_coords = []\n for j in range(8):\n if j%2==0:\n fixed_coords.append(coords[j] + area[2])\n else:\n fixed_coords.append(coords[j] + area[1])\n return fixed_coords\n \ndef transform_boundries(boundry_dict):\n fixed_dict = {}\n for key, value in boundry_dict.items():\n fixed_value = [value[0][0]-5, value[1][0]+5, value[0][1]-5, value[1][1]+5]\n fixed_dict[key] = fixed_value\n return fixed_dict\n \n\ndef create_bounded_output(readings, boundings, boundries, method = 3):\n output_dict = {}\n for key in boundries.keys():\n for i in range(len(readings)):\n if method == 1 : # area contain\n if check_boundry(boundings[i], boundries[key]): #c heck if temp rect in bigger rect\n output_dict[key] = readings[i]\n elif method == 2: # area intersection\n if check_overlap(boundings[i], boundries[key]): # using precentage of interseection, greater than 0.7 is true!\n output_dict[key] = readings[i]\n elif method == 3: # dot and contain\n if check_dot(boundings[i], boundries[key]): # rectangle containing center point\n output_dict[key] = readings[i]\n if key not in output_dict.keys():\n output_dict[key] = \"N/A\"\n # output_dict[key] = None\n return output_dict\n\n\"\"\"\ndef create_bounded_output(readings, boundings, boundries):\n output_dict = {}\n for key in boundries.keys():\n for i in range(len(readings)):\n if check_boundry(boundings[i], boundries[key]):\n output_dict[key] = readings[i]\n if key not in output_dict.keys():\n output_dict[key] = \"N/A\"\n return output_dict\n\"\"\"\n\n\ndef check_overlap(temp_bounding, hard_bounding):\n \"\"\"\n check whether the given bounding overlaps with the \n boundry\n Arguments:\n temp_bounding - coordinates (8, representing 4 corners)\n of the current reading\n hard_counding - coordiantes representing the boundries \n of the selected field\n Returns:\n output - binary - true means the bounding overlaps with\n the boundry\n \"\"\"\n\n a = [hard_bounding[0][0],hard_bounding[0][1],hard_bounding[1][0],hard_bounding[1][1]]\n b = [temp_bounding[0],temp_bounding[1],temp_bounding[4],temp_bounding[5]]\n total_area = (a[2] - a[0]) * (a[3] - a[1])\n dx = min(a[2], b[2]) - max(a[0], b[0])\n dy = min(a[3], b[3]) - max(a[1], b[1])\n if (dx>=0) and (dy>=0):\n if float((dx * dy) / total_area) > 0.7:\n return True\n return False\n\n \ndef check_dot(temp_bounding, hard_bounding):\n \"\"\"\n check whether the given bounding contains the center of \n the boundry\n Arguments:\n temp_bounding - coordinates (8, representing 4 corners)\n of the current reading\n hard_counding - coordiantes representing the boundries \n of the selected field\n Returns:\n output - binary - true means the bounding is within the \n boundry\n \"\"\"\n\n # center_dot = (hard_bounding[0][0] + (hard_bounding[1][0] - hard_bounding[0][0])/ 2 , hard_bounding[0][1] + (hard_bounding[1][1] - hard_bounding[0][1])/ 2)\n center_dot = (hard_bounding[0] + (hard_bounding[1] - hard_bounding[0])/ 2 , hard_bounding[2] + (hard_bounding[3] - hard_bounding[2])/ 2)\n if center_dot[0] >= temp_bounding[0] and center_dot[0] <= temp_bounding[4] and center_dot[1] >= temp_bounding[1] and center_dot[1] <= temp_bounding[5]:\n return True\n return False\n\n\ndef check_boundry(bounding, boundry):\n \"\"\"\n check whether the given bounding is within the given \n boundry\n Arguments:\n bounding - coordinates (8, representing 4 corners) of \n the current reading\n boundry - coordiantes representing the boundries of the\n selected field\n Returns:\n output - binary - true means the bounding is within the \n boundry\n \"\"\"\n output = bounding[0]>=boundry[0]\n output = output and (bounding[6]>=boundry[0])\n output = output and (bounding[2]<=boundry[1])\n output = output and (bounding[4]<=boundry[1])\n output = output and (bounding[1]>=boundry[2])\n output = output and (bounding[3]>=boundry[2])\n output = output and (bounding[5]<=boundry[3])\n output = output and (bounding[7]<=boundry[3])\n return output \n\n\ndef fix_string(s):\n # irrelevant function, not in use\n json_string_fin = \"\"\n last_c=\"\"\n for c in s:\n if c!=\"\\'\":\n json_string_fin += c\n if c==\"{\":\n if last_c==\"\\'\":\n json_string_fin = last_string + \"\\'{\"\n else:\n last_string = json_string_fin\n if last_c!=\"}\":\n json_string_fin += \"\\\"\"\n else:\n json_string_fin += \"\\'\"\n last_c = c\n return json_string_fin\n\n\ndef sockets_output_former(ocr_res, mon_id, medical_warning, mode_warning, results_warning):\n \"\"\" forms the final output json to be sent to the UI via socket, with respect to the selected data format\n\n Arguments:\n ocr_res {dict} -- OCR Results\n mon_id {string} -- device ID to be updated\n medical_warning {string} -- warning displayed on respirator (if exists)\n mode_warning {bool} -- does the respirator in the wrong mode?\n results_warning {bool} -- does 50% or more of the results are N/A?\n\n Returns:\n output {json} -- the json to be sent\n \"\"\"\n json_dict = {}\n json_dict[\"JsonData\"] = ocr_res\n json_dict[\"DeviceID\"] = mon_id\n json_dict[\"medicalWarning\"] = medical_warning\n json_dict[\"modeWarning\"] = mode_warning\n json_dict[\"resultsWarning\"] = results_warning\n json_dict[\"error\"] = None\n json_dict[\"deviceType\"] = os.getenv(\"DEVICE_TYPE\")\n json_dict[\"gilayon_num\"] = os.getenv(\"GILAYON_NUM\")\n output = json.dumps(json_dict)\n print(\"OCR final output: \\n\", output)\n return output\n\n\ndef get_digits(img, computervision_client, mode=\"digits\"):\n \"\"\"\n reads the textual information from an image (monitor)\n Sends the frame to MS OCR cloud cognitive service\n Arguments:\n img - the source image\n computervision_client - the client\n mode - \"digits\"/\"text\" - reads device measures or device text (such as warnings and modes)\n Returns:\n results - the ocr results - a list containing pairs of \n the [result, result bounding]\n \"\"\"\n\n # encodedFrame = cv2.imencode(\".jpg\", img)[1].tostring()\n # tmp_frame = cv2.imdecode(np.frombuffer(img, np.uint8), -1)\n # cv2.imshow(\"image\", tmp_frame)\n # cv2.waitKey(0)\n try:\n recognize_printed_results = computervision_client.batch_read_file_in_stream(io.BytesIO(img), raw = True)\n # Reading OCR results\n operation_location_remote = recognize_printed_results.headers[\"Operation-Location\"]\n operation_id = operation_location_remote.split(\"/\")[-1]\n except Exception as e:\n print(\"MSOCR Cognitive Service Exception! \\n\", e)\n raise MSOCRServiceVAOCVError\n while True:\n get_printed_text_results = computervision_client.get_read_operation_result(operation_id)\n if get_printed_text_results.status not in ['NotStarted', 'Running']:\n break\n time.sleep(0.1)\n \n tmp_frame = cv2.imdecode(np.frombuffer(img, np.uint8), -1)\n results = []\n text_flag = False\n show_frame_flag = False\n if get_printed_text_results.status == TextOperationStatusCodes.succeeded:\n for text_result in get_printed_text_results.recognition_results:\n for line in text_result.lines:\n # print(line.text, line.bounding_box)\n if mode == \"digits\":\n line.text = line.text.replace(\"O\", \"0\")\n line.text = line.text.replace(\"o\", \"0\")\n s = re.sub('[^0123456789./:]', '', line.text)\n if s != \"\":\n if s[0] == \".\":\n s = s[1:]\n s = s.rstrip(\".\")\n text_flag = True\n cv2.rectangle(tmp_frame, (int(line.bounding_box[0]), int(line.bounding_box[1])), (int(line.bounding_box[4]), int(line.bounding_box[5])), (255,0,0), 2)\n cv2.putText(tmp_frame,s,(int(line.bounding_box[0])-5, int(line.bounding_box[1])-5),cv2.FONT_HERSHEY_COMPLEX,0.3,(0,0,0),1)\n results.append((s, line.bounding_box))\n else:\n continue\n if mode == \"modes\":\n # s = re.sub('[^0123456789./:]', '', line.text)\n s = line.text\n if s != \"\":\n if s[0] == \".\":\n s = s[1:]\n s = s.rstrip(\".\")\n text_flag = True\n cv2.rectangle(tmp_frame, (int(line.bounding_box[0]), int(line.bounding_box[1])), (int(line.bounding_box[4]), int(line.bounding_box[5])), (255,0,0), 2)\n cv2.putText(tmp_frame,s,(int(line.bounding_box[0])-5, int(line.bounding_box[1])-5),cv2.FONT_HERSHEY_COMPLEX,0.3,(0,0,0),1)\n results.append((s, line.bounding_box))\n else:\n continue\n if text_flag and show_frame_flag:\n cv2.imshow(\"image\", tmp_frame)\n cv2.waitKey(0)\n return(results)\n\n\ndef get_ala_digits(img):\n # tmp_frame = cv2.imdecode(np.frombuffer(img, np.uint8), -1)\n # cv2.imshow(\"image\", tmp_frame)\n # cv2.waitKey(0)\n enc_img = base64.b64encode(img)\n data = {\"image\": str(enc_img, 'utf-8')}\n # data['screenCorners'] = {\"left-top\": [1, 1], \"right-top\": [600, 1], \"bottom-left\": [1, 550], \"bottom-right\": [600, 550]}\n res = requests.post(\"http://127.0.0.1:8088/v1/run_ocr\", json=data)\n res_str = json.loads(res.text)\n results_dicts = [{\"text\": x[\"value\"], \"coords\": [x[\"left\"], x[\"top\"], x[\"right\"], x[\"top\"], x[\"right\"], x[\"bottom\"], x[\"left\"], x[\"bottom\"]]} for x in res_str]\n # print(results_dicts)\n filtered_results = []\n for item in results_dicts:\n s = re.sub('[^0123456789./:]', '', item['text'])\n if s != \"\":\n if s[0] == \".\":\n s = s[1:]\n s = s.rstrip(\".\")\n filtered_results.append((s, item['coords']))\n else:\n continue\n # print(\"--------------------\")\n # print(filtered_results)\n return(filtered_results)\n\ndef get_intel_digits(img, mode):\n \"\"\"preform OCR on selected frame, parse and returns the values.\n Works with Intel's Text Spotting model.\n\n Arguments:\n img {image} -- frame\n mode {string} -- \"digits\"/\"text\" - reading measures digits or text (such as warning etc.)\n \"\"\"\n # tmp_frame = cv2.imdecode(np.frombuffer(img, np.uint8), -1)\n # cv2.imshow(\"image\", tmp_frame)\n # cv2.waitKey(0)\n enc_img = base64.b64encode(img)\n data = {\"image\": str(enc_img, 'utf-8')}\n INTEL_OCR_ENDPOINT = os.getenv(\"INTEL_OCR_ENDPOINT\")\n # res = requests.post(\"http://127.0.0.1:8088/run_ocr\", json=data)\n res = requests.post(INTEL_OCR_ENDPOINT, json=data)\n res_str = json.loads(res.text)\n results_dicts = [{\"text\": x[\"text\"], \"coords\": [x[\"coords\"][\"left\"], x[\"coords\"][\"top\"], x[\"coords\"][\"right\"], x[\"coords\"][\"top\"], x[\"coords\"][\"right\"], x[\"coords\"][\"bottom\"], x[\"coords\"][\"left\"], x[\"coords\"][\"bottom\"]]} for x in res_str]\n # print(results_dicts)\n filtered_results = []\n for item in results_dicts:\n if mode == \"digits\":\n s = re.sub('[^0123456789./:]', '', item['text'])\n if s != \"\":\n if s[0] == \".\":\n s = s[1:]\n s = s.rstrip(\".\")\n filtered_results.append((s, item['coords']))\n else:\n continue\n elif mode == \"text\":\n s = item['text']\n if s != \"\":\n filtered_results.append((s, item['coords']))\n else:\n continue\n # print(filtered_results)\n return(filtered_results)\n\n\ndef boundries_to_areas(boundries, hight, width):\n areas = {}\n i = 0\n for k,v in boundries.items():\n x1, y1, x2, y2 = v[0][0], v[0][1], v[1][0], v[1][1]\n top, bottom, left, right = y1/hight, y2/hight, x1/width, x2/width\n areas[i] = [top, bottom, left, right]\n i = i + 1\n return areas\n\n\ndef getVelaModeAndWarning(img, marker_corners, computervision_client):\n \"\"\" If device is a Vela respirator - read its warnings and selected mode.\n Works with MSOCR and with INTEL ocr, dependes on the env var.\n\n Arguments:\n img {image} -- raw video frame\n marker_corners {list} -- ARuco markers locations detected on the device\n computervision_client {MS cv client} -- client object to send MS OCR the frame\n\n Returns:\n found_mode, found_warning - {string}, {string} -- found mode, found warning\n \"\"\"\n s = img.shape\n height, width = s[0], s[1]\n two_top_markers = sorted(marker_corners, key=lambda tup: tup[1])[:2] # get 2 top markers\n left_corner, right_corner = sorted(two_top_markers, key=lambda tup: tup[0])[0] , sorted(two_top_markers, key=lambda tup: tup[0])[1] #get left and right\n \n x_range = [left_corner[0] / width , right_corner[0] / width ] # x range in form of [left_x, right_x]\n y_range = [0, (max(left_corner[1],right_corner[1]) / height) + 0.1] # y range in form of [0, y_top_corners + 10% of frame height\n experiment_area = {\"mode_warning\": [y_range[0], y_range[1], x_range[0], x_range[1]]}\n # print('experiment_area:', experiment_area)\n \n #top_area = {\"mode\": [0, 0.35, 0, 0.4], \"warning\": [0, 0.35, 0.55, 1]}\n top_area = experiment_area \n areas = create_areas(top_area, img)\n readings = {}\n boundings = {}\n i = 0\n CV_MODEL = os.getenv(\"CV_MODEL\")\n for area in areas:\n if CV_MODEL == \"MSOCR\":\n results = get_digits(cv2.imencode(\".jpg\", area[0])[1], computervision_client, \"modes\")\n elif CV_MODEL == \"INTEL\":\n results = get_intel_digits(cv2.imencode(\".jpg\", area[0])[1],\"text\")\n # results = get_ala_digits(cv2.imencode(\".jpg\", area[0])[1])\n for item in results:\n readings[i] = item[0]\n boundings[i] = transform_coords(item[1], area)\n i = i + 1\n strings_found = [v.lower().replace(\" \", \"\") for v in readings.values()]\n # print(strings_found)\n\n # check mode:\n known_modes = [\"volumesimv\",\"prvca/c\"]\n if CV_MODEL == \"INTEL\":\n known_modes = [\"volumesimv\",\"prvca1c\"]\n min_mode_distance = 99\n found_mode = \"UNKNOWN MODE\"\n for item in strings_found:\n for mode in known_modes:\n tmp_mode_dis = Ldistance(item, mode)\n # print(item, mode, tmp_mode_dis)\n if tmp_mode_dis < min_mode_distance and tmp_mode_dis <= len(mode)/2:\n min_mode_distance = tmp_mode_dis\n found_mode = mode\n # print(\"update\")\n # print(found_mode, min_mode_distance)\n\n # check warning:\n known_warnings = [\"highpip\", \"lowpip\", \"circuitdisconnect\", \"lowve\", \"apneainterval\", \"o2inletlow\", \"checkfilter\"]\n min_warning_distance = 99\n found_warning = \"no warning\"\n for item in strings_found:\n for warning in known_warnings:\n tmp_warning_dis = Ldistance(item, warning)\n # print(item, warning, tmp_warning_dis)\n if tmp_warning_dis < min_warning_distance and tmp_warning_dis <= len(warning)/2:\n min_warning_distance = tmp_warning_dis\n found_warning = warning\n # print(\"update\")\n # print(found_warning, min_warning_distance)\n return found_mode, found_warning\n\n\ndef fix_readings(readings_dic):\n \"\"\"\n fix faulty readings, based on basic measures-based logic and domain knowledge\n Arguments:\n readings_dic - a dictionary containing the results of \n the ocr\n Returns:\n readings_dic - the dictionary containing the fixed \n results of the ocr\n \"\"\"\n\n CV_MODEL = os.getenv(\"CV_MODEL\")\n for name, read in readings_dic.items(): \n read, rlen = str(read), len(read)\n if read == \"N/A\":\n continue\n if name == 'IBP' or name == 'NIBP':\n if CV_MODEL == \"MSOCR\":\n if rlen >= 6: # XXX/XXX or XXX/XX\n if read[3] in [\"7\", \"1\", \"4\"] : # mistake: 120780 -> 120/80, 1407110 -> 140/110 \n readings_dic[name] = read[:3] + '/' + read[4:]\n elif rlen == 5: # XX/XX\n if read[2] in [\"7\", \"1\", \"4\"] : # mistake: 90760 -> 90/60 \n readings_dic[name] = read[:2] + '/' + read[3:]\n else:\n readings_dic[name] = \"N/A\"\n elif CV_MODEL == \"INTEL\":\n if rlen == 4: # XXXX\n readings_dic[name] = read[:2] + '/' + read[2:]\n elif rlen == 5: # XX/XX or XXXXX\n if 100 <= int(read[:3]) <300: # XXXXX\n readings_dic[name] = read[:3] + '/' + read[3:]\n else: # XX/XX : 90760 -> 90/60 \n readings_dic[name] = read[:2] + '/' + read[3:]\n elif rlen >= 6: # XXX/XXX or XXX/XX --- TODO: !!in case \"/\" is missing - can't tell between xxx/xx and xxxxxx!!\n if read[3] in [\"7\", \"1\", \"4\"] : # mistake: 120780 -> 120/80, 1407110 -> 140/110 \n readings_dic[name] = read[:3] + '/' + read[4:]\n\n elif name == 'HR': \n continue\n elif name == \"RR\":\n if rlen > 2:\n readings_dic[name] = read[rlen-2] + read[rlen-1]\n elif name == 'etCO2':\n if rlen > 2:\n if read.endswith(\"100\"):\n readings_dic[name] = \"100\"\n else:\n readings_dic[name] = read[rlen-2] + read[rlen-1]\n elif name == 'SpO2':\n if int(read) > 100: # TODO: left to decide whether to check situations with single-digit sat level\n if rlen == 3:\n readings_dic[name] = read[1] + read[2]\n elif read[rlen-1] == \"0\" and read[rlen-2] == \"0\":\n readings_dic[name] = \"100\"\n else:\n readings_dic[name] = read[rlen-2] + read[rlen-1]\n elif rlen < 2:\n readings_dic[name] = \"N/A\"\n elif name == 'Temp': \n if rlen >= 3:\n if read[2] not in [',','.']: # XXX \n readings_dic[name] = read[:2] + '.' + read[2:] # mistake: 307 -> 30.7\n elif name == 'IE': \n if rlen >= 2:\n if read[0] == 1 and read[-1] == 1:\n continue #we dont know what to do\n if read[0] == '1': # numbers in format 1:##\n if read[1] != ':':\n read = read[:1] + ':' + read[1:] # mistake: 133 -> 1:33\n if read[-2] != '.':\n read = read[:-1] + '.' + read[-1:] # mistake: 1:33 -> 1:3.3\n readings_dic[name] = read\n elif read[-1] == '1': # numbers in format ##:1\n if read[-2] != ':':\n read = read[:-1] + ':' + read[-1:] # mistake: 331 -> 33:1\n if read[1] != '.':\n read = read[:1] + '.' + read[1:] # mistake: 33:1 -> 3.3:1\n readings_dic[name] = read\n return readings_dic\n \ndef isNumber(x):\n \"\"\"\n check whether the input is a number\n Arguments:\n x - an input\n Returns:\n binary - true if the input is a number\n \"\"\"\n try:\n return bool(0 == float(x)*0)\n except:\n return False\n\ndef create_hist_dict(result_list):\n \"\"\"\n create a histogram of the results \n Arguments:\n results_list - a list of dictionaries containing several \n ocr results\n the ocr\n Returns:\n hist_dict - a dictionary of dictionaries. every \n key(field) has a dictionary, and the field's \n dictionary's keys are the possible results. it's values \n are the number of times they appeard in the \n results_list.\n \"\"\"\n hist_dict = {}\n for res in result_list:\n for (key, val) in res.items():\n if key in hist_dict.keys():\n if val in hist_dict[key].keys():\n hist_dict[key][val] += 1\n else:\n hist_dict[key][val] = 1\n else:\n new_dict = {}\n new_dict[val] = 1\n hist_dict[key] = new_dict\n return(hist_dict)\n\ndef histogram_out(result_list, key, k):\n \"\"\"\n return from a list of results, for a specific key, the\n top result that appeard at least k times (or none if \n there isn't one)\n Arguments:\n result_list - a list of dictionaries containing several \n ocr results\n key - a specific key for the results (the name of a \n field)\n k - number of minimum times the result appeard\n Returns:\n the top result that appeared at least k times\n \"\"\"\n hist_dict = create_hist_dict(result_list)\n best = max(hist_dict[key].items(), key=operator.itemgetter(1))\n if (best[1] >= k and best[0] != \"N/A\"): \n return best[0]\n else:\n return None\n\ndef fix_output(output, results_list, k):\n \"\"\"\n fix the result's errors based on the last few results.\n checks if the numeric results is similar (in 50% range \n from last result), and fixes it if it's not and the \n result before the last isn't different from the last (in \n which case - keeps this one)\n also fixes N/A's (missed readings) by returning the last \n viable result\n Arguments:\n output - the current ocr result (a dictionary)\n results_list - a list of dictionaries containing several \n ocr results\n k - number of minimum times the result appeard \n Returns:\n new_output - the fixed ocr result\n \"\"\"\n new_output = output.copy()\n for key, val in output.items():\n new_val = val\n # print(isNumber(val))\n for i in range(len(results_list)):\n last_val = \"N/A\"\n if results_list[-1-i][key] != \"N/A\":\n last_val = results_list[-1-i][key]\n last_val_key = i\n break\n if val == \"N/A\":\n new_val = last_val\n else:\n hist_out = histogram_out(results_list, key, k)\n if hist_out:\n new_val = hist_out\n elif isNumber(val) and isNumber(last_val): \n if abs(float(val)-float(last_val)) > 0.5 * float(val):\n new_val = last_val\n older_val = None\n for i in range(len(results_list)-last_val_key-1):\n if results_list[-2-last_val_key-i][key] != \"N/A\":\n older_val = results_list[-2-last_val_key-i][key]\n break\n older_val = \"N/A\"\n if isNumber(older_val):\n if abs(float(last_val)-float(older_val)) > 0.5 * float(last_val):\n new_val = val\n new_output[key] = new_val\n return new_output\n \n\ndef generic_errors(output, last_results):\n \"\"\"\n return some errors based on the output (amount of \n missing fields, fields that are completely missing from \n the last few results)\n Arguments:\n output - the current ocr result (a dictionary)\n last_results - a list of dictionaries containing several \n ocr results\n Returns:\n results_warning - binary T/F\n \"\"\"\n results_warning = None\n miss_count = 0\n for key, value in output.items():\n if value == \"N/A\":\n miss_count += 1\n if miss_count > 0:\n if miss_count >= 0.5*len(output):\n results_warning = True\n # if miss_count >= 0.75*len(output):\n # print(\"Fatal error, almost no data in format\")\n # else:\n # print(\"Error, most data not in format\",\"red\")\n else:\n pass\n # print(\"Mild error, some missing fields\",\"yellow\")\n missing_dict = {}\n amount_of_results = len(last_results)\n for res in last_results:\n for key, value in res.items():\n if value is None:\n if key in missing_dict.keys():\n missing_dict[key] += 1\n else:\n missing_dict[key] = 1\n completely_missing_vals = []\n for key, value in missing_dict.items():\n if value == amount_of_results:\n completely_missing_vals.append(key)\n if len(completely_missing_vals) > 0:\n print(\"Error, \", completely_missing_vals, \"are missing from the last \", amount_of_results, \"frames!\")\n return results_warning\n\n\ndef AnalyzeFrame(orig_frame, computervision_client, boundries, areas_of_interes, ocrsocket, last_four_corners, old_results_list):\n frame = cv2.imdecode(np.frombuffer(orig_frame, np.uint8), -1)\n orig_frame = cv2.imdecode(np.frombuffer(orig_frame, np.uint8), -1)\n \n # Find ARuco corners:\n new_corners = detect_markers(frame)\n old_corners = last_four_corners\n\n # TODO: check for duplicated markers - whether one was detected twice in close locations\n if len(new_corners) < 4:\n fixed_corners = fix_corners(new_corners, old_corners)\n elif len(new_corners) > 4:\n # print(\"too much markers - get old ones\")\n fixed_corners = old_corners\n else:\n old_corners = new_corners\n fixed_corners = new_corners\n frame = four_point_transform(frame, fixed_corners)\n \n mode_warning = None\n medical_warning = None\n device_type = os.getenv(\"DEVICE_TYPE\")\n if device_type == \"respiration\":\n found_mode, found_warning = getVelaModeAndWarning(orig_frame, fixed_corners, computervision_client)\n if found_mode != \"volumesimv\":\n mode_warning = True\n # print(\"UNKNOWN MODE DETECTED!!\")\n # TODO: try again and raise exception\n if found_warning != \"no warning\":\n medical_warning = found_warning\n # print(\"RESPIRATION WARNING: \", found_warning)\n\n # Pre-Process: TODO: Integrate Gidi's module\n frame = ImagePreprocess.unsharp(frame)\n # frame = ImagePreprocess.filter2d(frame)\n \n areas_dict = areas_of_interes\n combine_areas_to_frame = True\n if os.environ['CV_MODEL'] == 'INTEL' and combine_areas_to_frame:\n try:\n areas = create_areas_black(areas_dict, frame)\n except Exception as e:\n print('Error in black area:\\n', e)\n raise e\n else:\n areas = create_areas(areas_dict, frame)\n \n # our output\n readings = {}\n boundings = {}\n i = 0\n CV_MODEL = os.getenv(\"CV_MODEL\")\n for area in areas:\n try:\n if CV_MODEL == \"MSOCR\":\n results = get_digits(cv2.imencode(\".jpg\", area[0])[1], computervision_client, \"digits\")\n elif CV_MODEL == \"ALA\":\n results = get_ala_digits(cv2.imencode(\".jpg\", area[0])[1])\n elif CV_MODEL == \"INTEL\":\n # TODO: align functionality with get_digits:\n results = get_intel_digits(cv2.imencode(\".jpg\", area[0])[1], \"digits\")\n else:\n raise Exception(\"UNRECOGNIZED MODEL\")\n except Exception as e:\n print(\"Exception in get_digits: \\n\", e)\n raise e\n for item in results:\n readings[i] = item[0]\n boundings[i] = transform_coords(item[1], area)\n i = i + 1\n # print(\"Raw readings: \\n\", readings, \"\\n Boundings: \\n\", boundings)\n print(\"Raw readings: \\n\", readings)\n output = create_bounded_output(readings, boundings, transform_boundries(boundries), 3)\n print(\"OCR output (before changes): \\n\", output)\n # IMPORTANT: when needed - comment-out next line and change get_boundries accordingly\n # Fix Readings based on known measures format:\n output = fix_readings(output)\n \n k_frames_to_save = 5\n if len(old_results_list) < k_frames_to_save:\n old_results_list.append(output) #build \"window\" of 5 frames\n fixed_result = False\n #return last_results #just append, less than 5 frames seen\n else:\n old_results_list.pop(0) #remove oldest frame from list\n fixed_result = fix_output(output, old_results_list, k_frames_to_save-1) \n old_results_list.append(output) #add our current result\n output = fixed_result\n \n results_warning = None\n results_warning = generic_errors(output, old_results_list)\n # print(\"Results Warning: \", results_warning)\n\n # print(output)\n \n monitor_id = os.getenv(\"DEVICE_ID\")\n json_to_socket = sockets_output_former(output, monitor_id, medical_warning, mode_warning, results_warning)\n for trail in range(4):\n try:\n ocrsocket.emit('data', json_to_socket)\n except:\n if trail == 3:\n print(\"raising exception , no socket\")\n raise OCRSocketVAOCVError(\"Can't emit OCR results to socket\")\n else:\n print(\"trying again\")\n time.sleep(1)\n continue\n break\n FRAME_DELAY = os.getenv(\"FRAME_DELAY\")\n time.sleep(float(FRAME_DELAY))\n return old_corners, old_results_list\n", "sub_path": "AnalyzeFrame.py", "file_name": "AnalyzeFrame.py", "file_ext": "py", "file_size_in_byte": 37488, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "math.sqrt", "line_number": 37, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 80, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 83, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 84, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 85, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 86, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 107, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 121, "usage_type": "call"}, {"api_name": "cv2.getPerspectiveTransform", "line_number": 127, "usage_type": "call"}, {"api_name": "cv2.warpPerspective", "line_number": 129, "usage_type": "call"}, {"api_name": "cv2.aruco.Dictionary_get", "line_number": 153, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 153, "usage_type": "attribute"}, {"api_name": "cv2.aruco.DetectorParameters_create", "line_number": 154, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 154, "usage_type": "attribute"}, {"api_name": "cv2.aruco.detectMarkers", "line_number": 157, "usage_type": "call"}, {"api_name": "cv2.aruco", "line_number": 157, "usage_type": "attribute"}, {"api_name": "numpy.mean", "line_number": 161, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 192, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 215, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 219, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 222, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 224, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 397, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 398, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 399, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 422, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 433, "usage_type": "call"}, {"api_name": "cv2.imdecode", "line_number": 435, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 435, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 435, "usage_type": "attribute"}, {"api_name": "azure.cognitiveservices.vision.computervision.models.TextOperationStatusCodes.succeeded", "line_number": 439, "usage_type": "attribute"}, {"api_name": "azure.cognitiveservices.vision.computervision.models.TextOperationStatusCodes", "line_number": 439, "usage_type": "name"}, {"api_name": "re.sub", "line_number": 446, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 452, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 453, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 453, "usage_type": "attribute"}, {"api_name": "cv2.rectangle", "line_number": 465, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 466, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_COMPLEX", "line_number": 466, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 471, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 472, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 480, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 483, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 484, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 489, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 512, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 514, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 516, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 517, "usage_type": "call"}, {"api_name": "re.sub", "line_number": 523, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 580, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 583, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 585, "usage_type": "call"}, {"api_name": "Levenshtein.distance", "line_number": 602, "usage_type": "call"}, {"api_name": "Levenshtein.distance", "line_number": 616, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 637, "usage_type": "call"}, {"api_name": "operator.itemgetter", "line_number": 763, "usage_type": "call"}, {"api_name": "cv2.imdecode", "line_number": 864, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 864, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 864, "usage_type": "attribute"}, {"api_name": "cv2.imdecode", "line_number": 865, "usage_type": "call"}, {"api_name": "numpy.frombuffer", "line_number": 865, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 865, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 884, "usage_type": "call"}, {"api_name": "ImagePreprocess.unsharp", "line_number": 896, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 901, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 914, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 918, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 920, "usage_type": "call"}, {"api_name": "cv2.imencode", "line_number": 923, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 958, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 969, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 972, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 973, "usage_type": "call"}]} +{"seq_id": "628237317", "text": "import pygame\n\npygame.init()\n\nDISPLAY_WIDTH = 600\nDISPLAY_HEIGHT = 480\nWHITE = (255, 255, 255)\n\n# Load the car images.\nblue_car = pygame.image.load('images/bluecar.png')\nred_car = pygame.image.load('images/redcar.png')\n\n# As befor we need size info for the images.\nblue_car_width, blue_car_height = blue_car.get_rect().size\nred_car_width, red_car_height = red_car.get_rect().size\n\n# In this demo we place both cars side by side at the bottom of the screen\n# on each side of the \"middle line\". Lets have about 10 pixels of space between them\n# and have each one 4 pixels above the bottom of the screen.\nblue_car_x = (DISPLAY_WIDTH / 2) - (blue_car_width + 5)\nblue_car_y = (DISPLAY_HEIGHT - 4) - blue_car_height\n\n# because the left side of the car is on the right side of the \"Middleline\"\n# we dont't need to add the width, only half of the suggested space between the cars\nred_car_x = (DISPLAY_WIDTH / 2) + 5\n# identicle to the second line of the blue car since this sets the distance of the cars\n# to the bottom of the screen. The height of both cars needs to be taken into\n# consideration so that the cars won't be \"displayed\" off the screen.\nred_car_y = (DISPLAY_HEIGHT - 4) - red_car_height\n\nscreen = pygame.display.set_mode((DISPLAY_WIDTH, DISPLAY_HEIGHT))\npygame.display.set_caption('Image')\n\nrunning = True\n\nwhile running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n\n screen.fill(WHITE)\n # now we blit the two cars to the screen\n screen.blit(red_car, (red_car_x, red_car_y))\n screen.blit(blue_car, (blue_car_x, blue_car_y))\n\n pygame.display.update()\n\npygame.quit()", "sub_path": "Python/FORR2HF05CU/Lokaverkefni/Sýniverkefni/03_PyGame/03_third_img(1).py", "file_name": "03_third_img(1).py", "file_ext": "py", "file_size_in_byte": 1645, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pygame.init", "line_number": 3, "usage_type": "call"}, {"api_name": "pygame.image.load", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 31, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 31, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 32, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 32, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 37, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 37, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.quit", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "320768881", "text": "# https://github.com/keithweaver/python-aws-s3\n\nimport boto3\nimport logging\n# from botocore.client import Config\n\nACCESS_KEY_ID = 'AKIA56JHVDHF24AYYC6Q'\nACCESS_SECRET_KEY = '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'\nBUCKET_NAME = 'raspi-smart-camera'\n\ndef test_upload(local_filename = \"/home/pi/raspi-smart-camera/img0.jpg\", s3_file_name = \"test_img.jpg\"):\n \n #note the s3 filename/path is set differently and has to be listed manually\n data = open(local_filename, 'rb')\n s3 = boto3.resource(\n 's3',\n aws_access_key_id=ACCESS_KEY_ID,\n aws_secret_access_key=ACCESS_SECRET_KEY,\n # config=Config(signature_version='s3v4')\n )\n try:\n s3.Bucket(BUCKET_NAME).put_object(Key=\"upload_folder/\" + s3_file_name, Body=data)\n logging.info(\"Successfully uploaded file {} to S3 bucket {}/{}.\".format(local_filename, BUCKET_NAME, s3_file_name))\n\n except Exception as e:\n print(\"Error: could not upload file:\" + local_filename + \" to s3:\" + str(e))\n\n print (\"Upload Done\")\n\ndef test_download(s3_file_name = \"test_folder/test_img.jpg\", local_download_path = \"test_img.jpg\" ):\n # S3 Connect\n s3 = boto3.resource(\n 's3',\n aws_access_key_id=ACCESS_KEY_ID,\n aws_secret_access_key=ACCESS_SECRET_KEY,\n # config=Config(signature_version='s3v4')\n )\n\n s3_file_name = \"upload_folder/\" + str(image_str)\n local_download_path = \"images/\" + image_str #include the file name\n print(\"prebucket\")\n #include the file name\n\n # Image download\n s3.Bucket(BUCKET_NAME).download_file(s3_file_name, local_download_path); # Change the second part\n # This is where you want to download it too.\n # I believe the semicolon is there on purpose\n\n print (\"Download Done\")\n\n# I guess this doesn't really need to be a lambda?\n# 1. From Wheesh, on button press, send image using above logic (test_upload)\n# 2. Then, from Wheesh, invoke a lambda sitting in AWS that takes the sent filename\n# and runs some ML on the file and place it somewhere in an s3 bucket\n# 3. Wheesh makes a request for that file, as it knows the filename (test_download)\n# 4. In case it takes some time for the ML model to run, if Wheesh can't find the file, do a wait() and try again\n# 5. Finally, store the downloaded file somewhere and show blit it\n\n\n# test_upload()\ntest_download(\"img0_edited.jpg\")\n# test_download()\n", "sub_path": "simple_image_commands.py", "file_name": "simple_image_commands.py", "file_ext": "py", "file_size_in_byte": 2380, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "boto3.resource", "line_number": 15, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 23, "usage_type": "call"}, {"api_name": "boto3.resource", "line_number": 32, "usage_type": "call"}]} +{"seq_id": "536700820", "text": "import cv2\nimport os\nimport numpy as np\nimport yaml\nfrom paddle.inference import Config, create_predictor, PrecisionType\nfrom PIL import Image\n\nfrom .download import get_model_path\nfrom .preprocess import preprocess, Resize, NormalizeImage, Permute, PadStride, decode_image\nfrom .visualize import draw_det\n\nclass Detector(object):\n def __init__(self, model_name):\n parent_path = os.path.abspath(os.path.join(__file__, *(['..'] * 2)))\n yml_file = os.path.join(parent_path, 'configs/{}.yml'.format(model_name))\n with open(yml_file, 'r') as f:\n yml_conf = yaml.safe_load(f)\n \n infer_model = get_model_path(yml_conf['model_path'])\n infer_params = get_model_path(yml_conf['param_path'])\n config = Config(infer_model, infer_params)\n device = yml_conf.get('device', 'CPU')\n run_mode = yml_conf.get('mode', 'paddle')\n cpu_threads = yml_conf.get('cpu_threads', 1)\n if device == 'CPU':\n config.disable_gpu()\n config.set_cpu_math_library_num_threads(cpu_threads)\n elif device == 'GPU':\n # initial GPU memory(M), device ID\n config.enable_use_gpu(200, 0)\n # optimize graph and fuse op\n config.switch_ir_optim(True)\n\n precision_map = {\n 'trt_int8': Config.Precision.Int8,\n 'trt_fp32': Config.Precision.Float32,\n 'trt_fp16': Config.Precision.Half\n }\n\n\n if run_mode in precision_map.keys():\n config.enable_tensorrt_engine(\n workspace_size=(1 << 25) * batch_size,\n max_batch_size=batch_size,\n min_subgraph_size=yml_conf['min_subgraph_size'],\n precision_mode=precision_map[run_mode],\n use_static=True,\n use_calib_mode=False)\n\n if yml_conf['use_dynamic_shape']:\n min_input_shape = {\n 'image': [batch_size, 3, 640, 640],\n 'scale_factor': [batch_size, 2]\n }\n max_input_shape = {\n 'image': [batch_size, 3, 1280, 1280],\n 'scale_factor': [batch_size, 2]\n }\n opt_input_shape = {\n 'image': [batch_size, 3, 1024, 1024],\n 'scale_factor': [batch_size, 2]\n }\n config.set_trt_dynamic_shape_info(min_input_shape, max_input_shape,\n opt_input_shape)\n \n # disable print log when predict\n config.disable_glog_info()\n # enable shared memory\n config.enable_memory_optim()\n # disable feed, fetch OP, needed by zero_copy_run\n config.switch_use_feed_fetch_ops(False)\n self.predictor = create_predictor(config)\n self.yml_conf = yml_conf\n self.preprocess_ops = self.create_preprocess_ops(yml_conf)\n self.input_names = self.predictor.get_input_names()\n self.output_names = self.predictor.get_output_names()\n self.draw_threshold = yml_conf.get('draw_threshold', 0.5)\n self.class_names = yml_conf['label_list']\n\n \n def create_preprocess_ops(self, yml_conf):\n preprocess_ops = []\n for op_info in yml_conf['Preprocess']:\n new_op_info = op_info.copy()\n op_type = new_op_info.pop('type')\n preprocess_ops.append(eval(op_type)(**new_op_info))\n return preprocess_ops\n \n def create_inputs(self, image_files):\n inputs = dict()\n im_list, im_info_list = [], []\n for im_path in image_files:\n im, im_info = preprocess(im_path, self.preprocess_ops)\n im_list.append(im)\n im_info_list.append(im_info)\n\n inputs['im_shape'] = np.stack([e['im_shape'] for e in im_info_list], axis=0).astype('float32')\n inputs['scale_factor'] = np.stack([e['scale_factor'] for e in im_info_list], axis=0).astype('float32')\n inputs['image'] = np.stack(im_list, axis=0).astype('float32')\n return inputs\n \n def __call__(self, image_file):\n inputs = self.create_inputs([image_file])\n for name in self.input_names:\n input_tensor = self.predictor.get_input_handle(name)\n input_tensor.copy_from_cpu(inputs[name])\n \n self.predictor.run()\n boxes_tensor = self.predictor.get_output_handle(self.output_names[0])\n np_boxes = boxes_tensor.copy_to_cpu()\n boxes_num = self.predictor.get_output_handle(self.output_names[1])\n np_boxes_num = boxes_num.copy_to_cpu()\n if np_boxes_num.sum() <= 0:\n np_boxes = np.zeros([0, 6])\n \n if isinstance(image_file, str):\n image = Image.open(image_file).convert('RGB')\n elif isinstance(image_file, np.ndarray):\n image = image_file\n expect_boxes = (np_boxes[:, 1] > self.draw_threshold) & (np_boxes[:, 0] > -1)\n np_boxes = np_boxes[expect_boxes, :]\n image = draw_det(image, np_boxes, self.class_names)\n return image, {'bboxes': np_boxes.tolist()}", "sub_path": "modelcenter/PP-PicoDet/APP/src/detection.py", "file_name": "detection.py", "file_ext": "py", "file_size_in_byte": 5096, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.path.abspath", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 17, "usage_type": "call"}, {"api_name": "download.get_model_path", "line_number": 19, "usage_type": "call"}, {"api_name": "download.get_model_path", "line_number": 20, "usage_type": "call"}, {"api_name": "paddle.inference.Config", "line_number": 21, "usage_type": "call"}, {"api_name": "paddle.inference.Config.Precision", "line_number": 35, "usage_type": "attribute"}, {"api_name": "paddle.inference.Config", "line_number": 35, "usage_type": "name"}, {"api_name": "paddle.inference.Config.Precision", "line_number": 36, "usage_type": "attribute"}, {"api_name": "paddle.inference.Config", "line_number": 36, "usage_type": "name"}, {"api_name": "paddle.inference.Config.Precision", "line_number": 37, "usage_type": "attribute"}, {"api_name": "paddle.inference.Config", "line_number": 37, "usage_type": "name"}, {"api_name": "paddle.inference.create_predictor", "line_number": 72, "usage_type": "call"}, {"api_name": "preprocess.preprocess", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 98, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 114, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 117, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 117, "usage_type": "name"}, {"api_name": "numpy.ndarray", "line_number": 118, "usage_type": "attribute"}, {"api_name": "visualize.draw_det", "line_number": 122, "usage_type": "call"}]} +{"seq_id": "95292999", "text": "from arguments import get_args\nfrom ppo_agent import ppo_agent\nimport numpy as np\nfrom baselines.common.cmd_util import make_mujoco_env\nfrom models import MLP_Net\nimport torch\nimport os\nimport gym\nimport cv2\n\n# get tensors\ndef get_tensors(obs):\n return torch.tensor(obs, dtype=torch.float32).unsqueeze(0)\n\n# denormalize\ndef denormalize(x, mean, std, clip=10):\n x -= mean\n x /= (std + 1e-8)\n return np.clip(x, -clip, clip)\n\nif __name__ == '__main__':\n args = get_args()\n # create environment\n env = gym.make(args.env_name)\n # start to create model\n model_path = args.save_dir + args.env_name + '/model.pt'\n network = MLP_Net(env.observation_space.shape[0], env.action_space.shape[0], args.dist)\n network_model, filters = torch.load(model_path, map_location=lambda storage, loc: storage)\n network.load_state_dict(network_model)\n obs = denormalize(env.reset(), filters.rs.mean, filters.rs.std)\n reward_total = 0\n for _ in range(10000):\n env.render()\n obs_tensor = get_tensors(obs)\n with torch.no_grad():\n _, pi = network(obs_tensor)\n # select actions\n if args.dist == 'gauss':\n mean, std = pi\n actions = mean.detach().cpu().numpy().squeeze()\n elif args.dist == 'beta':\n alpha, beta = pi\n actions = (alpha - 1) / (alpha + beta - 2)\n actions = actions.detach().cpu().numpy().squeeze()\n actions = -1 + 2 * actions \n obs, reward, done, _ = env.step(actions)\n reward_total += reward\n if done:\n break\n obs = denormalize(obs, filters.rs.mean, filters.rs.std)\n print('the total reward in this episode is {}'.format(reward_total))\n", "sub_path": "07-proximal-policy-optimization/demo_mujoco.py", "file_name": "demo_mujoco.py", "file_ext": "py", "file_size_in_byte": 1725, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "torch.tensor", "line_number": 13, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 13, "usage_type": "attribute"}, {"api_name": "numpy.clip", "line_number": 19, "usage_type": "call"}, {"api_name": "arguments.get_args", "line_number": 22, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 24, "usage_type": "call"}, {"api_name": "models.MLP_Net", "line_number": 27, "usage_type": "call"}, {"api_name": "torch.load", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "423540255", "text": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 24 22:18:45 2020\n\n@author: louisgiron\n\"\"\"\nfrom tqdm import tqdm\nimport datetime\nimport logging\n\nfrom assets.models import RecommendedAssetsToBuy\nfrom code.config.utils import get_code\nfrom code.config.global_variables import URLS_RECOMMENDED_ASSETS\n\nlogging.basicConfig(level=logging.INFO)\n\n\ndef extract_html_code_from_one_page(html_code):\n logging.info('Assets - Scrap one page: extract all html code from page')\n html_code_extracted = html_code('tbody')[0]\n return html_code_extracted\n\n\ndef clean_text(text):\n new_text = text.replace(u'\\n', '')\n new_text = new_text.replace('EUR', '')\n new_text = new_text.replace('+', '')\n new_text = new_text.replace('-', '')\n new_text = new_text.split('(')[0]\n new_text = new_text.strip()\n return new_text\n\n\ndef convert_text_to_float(text):\n text_cleaned = clean_text(text)\n try:\n if text_cleaned != '':\n return float(text_cleaned)\n else:\n return None\n except Exception as e:\n logging.info(f'Recommended assets - Scrap one page: convert text to float: {e}')\n if '%' in text_cleaned:\n text_cleaned = text_cleaned.replace('%', '')\n return float(text_cleaned) / 100\n elif text_cleaned == 'Atteint':\n return float(1)\n else:\n return clean_text(text)\n\n\ndef convert_text_to_int(text):\n try:\n return int(clean_text(text))\n except Exception as e:\n logging.info(f'Recommended assets - Scrap one page: convert text to int: {e}')\n return clean_text(text)\n\n\ndef get_info_from_table_row(extracted_table_row):\n name = extracted_table_row.a.get_text()\n url = 'https://www.boursorama.com' + extracted_table_row.a['href']\n values_row = extracted_table_row('td')\n action = values_row[1].get_text()\n value = convert_text_to_float(values_row[2].get_text())\n value_objective = convert_text_to_float(values_row[3].get_text())\n potential_percent = convert_text_to_float(values_row[4].get_text())\n nb_analysts = convert_text_to_int(values_row[5].get_text())\n net_benefit_in_eur = convert_text_to_float(values_row[6].get_text())\n rentability_n_percent = convert_text_to_float(values_row[7].get_text())\n price_earning_ratio_n = convert_text_to_float(values_row[8].get_text())\n price_earning_ratio_n_plus_1 = convert_text_to_float(values_row[9].get_text())\n\n id_asset = get_id_asset_info(url=url)\n\n return (id_asset, name, url, action, value, value_objective, potential_percent, nb_analysts, net_benefit_in_eur,\n rentability_n_percent, price_earning_ratio_n, price_earning_ratio_n_plus_1)\n\n\ndef get_id_asset_info(url):\n html_code = get_code(url)\n id_asset_div = html_code('div', 'c-faceplate__body')\n try:\n return id_asset_div[0]('h2', 'c-faceplate__isin')[0].get_text().strip().split()[0]\n except Exception as e:\n logging.info(f'Recommended assets - Scrap one page: convert text to int: {e}')\n\n\ndef insert_table_row_in_db(extracted_table_row):\n (id_asset, name, url, action, value, value_objective, potential_percent, nb_analysts, net_benefit_in_eur,\n rentability_n_percent, price_earning_ratio_n, price_earning_ratio_n_plus_1) = get_info_from_table_row(\n extracted_table_row)\n\n response = RecommendedAssetsToBuy.objects.update_or_create(\n id_asset=id_asset,\n date_date=datetime.date.today(),\n defaults={'date': datetime.datetime.today(),\n 'name': name,\n 'url': url,\n 'action': action,\n 'value': value,\n 'value_objective': value_objective,\n 'potential_percent': potential_percent,\n 'nb_analysts': nb_analysts,\n 'net_benefit_in_eur': net_benefit_in_eur,\n 'rentability_n_percent': rentability_n_percent,\n 'price_earning_ratio_n': price_earning_ratio_n,\n 'price_earning_ratio_n_plus_1': price_earning_ratio_n_plus_1\n }\n )\n logging.info(f'Recommended assets - ingested {name} in db: {response}')\n\n\ndef insert_table_in_db(table):\n for row in table:\n insert_table_row_in_db(extracted_table_row=row)\n\n\ndef get_code_and_insert_in_db():\n for url in tqdm(URLS_RECOMMENDED_ASSETS):\n html_code = get_code(url=url)\n table = html_code('tbody')[0]\n insert_table_in_db(table=table)\n", "sub_path": "code/assets/get_recommended_assets_by_analyts.py", "file_name": "get_recommended_assets_by_analyts.py", "file_ext": "py", "file_size_in_byte": 4482, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "logging.basicConfig", "line_number": 16, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 16, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 20, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 43, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 57, "usage_type": "call"}, {"api_name": "code.config.utils.get_code", "line_number": 82, "usage_type": "call"}, {"api_name": "logging.info", "line_number": 87, "usage_type": "call"}, {"api_name": "assets.models.RecommendedAssetsToBuy.objects.update_or_create", "line_number": 95, "usage_type": "call"}, {"api_name": "assets.models.RecommendedAssetsToBuy.objects", "line_number": 95, "usage_type": "attribute"}, {"api_name": "assets.models.RecommendedAssetsToBuy", "line_number": 95, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 97, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 97, "usage_type": "attribute"}, {"api_name": "datetime.datetime.today", "line_number": 98, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 98, "usage_type": "attribute"}, {"api_name": "logging.info", "line_number": 112, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 121, "usage_type": "call"}, {"api_name": "code.config.global_variables.URLS_RECOMMENDED_ASSETS", "line_number": 121, "usage_type": "argument"}, {"api_name": "code.config.utils.get_code", "line_number": 122, "usage_type": "call"}]} +{"seq_id": "558497200", "text": "from PIL import Image\nimport sys\nimport numpy as np\nimport os\nimport glob\nimport tqdm\n\ndef read_img(file_path):\n origin = Image.open(file_path)\n width, height = origin.size\n return origin, width, height\ndef oil_painting(origin, radius, Intensity, width, height,output):\n final_img = Image.new(\"RGB\", (width, height), \"white\")\n print('pixel:',origin.getpixel((0,0)))\n print('width, height:', width,height)\n print(\"oil_painting\")\n for nX in range(radius, width - radius):\n for nY in range(radius, height - radius):\n# print('nX, nY: ',nX, nY)\n nSumR = np.zeros((256, ), dtype=np.int)\n nSumG = np.zeros((256, ), dtype=np.int)\n nSumB = np.zeros((256, ), dtype=np.int)\n nIntensityCount = np.zeros((256, ), dtype=np.int)\n for nX_O in range((-1)*radius, radius+1):\n for nY_O in range((-1)*radius, radius+1):\n nR = origin.getpixel((nX + nX_O, nY + nY_O))[0]\n nG = origin.getpixel((nX + nX_O, nY + nY_O))[1]\n nB = origin.getpixel((nX + nX_O, nY + nY_O))[2]\n CurIntensity = min(int((((nR+nG+nB)/3.)*Intensity)//255),255)\n i = CurIntensity\n nIntensityCount[i] += 1\n nSumR[i] += nR\n nSumG[i] += nG\n nSumB[i] += nB\n nMaxIndex = np.argmax(nIntensityCount)\n nCurMax = nIntensityCount[nMaxIndex]\n final_color = []\n final_color.append(int(nSumR[nMaxIndex] // nCurMax))\n final_color.append(int(nSumG[nMaxIndex] // nCurMax))\n final_color.append(int(nSumB[nMaxIndex] // nCurMax))\n final_img.putpixel((nX, nY), tuple(final_color))\n final_img.save(output)\nif __name__ == '__main__':\n #Variables\n input_image_path = './oil_in'\n output_path = './oil_out'\n input_path = glob.glob(os.path.join(input_image_path, '*.jpg'))\n \n for filename in input_path:\n input_filename = filename\n print(input_filename)\n output_filename = os.path.basename(filename)\n origin, width, height = read_img(input_filename)\n print(origin,width,height)\n output= os.path.join(output_path , output_filename)\n oil_painting(origin, 4, 10, width, height,output)\n\n", "sub_path": "Oil_Water_Effect/Oil_painting_new.py", "file_name": "Oil_painting_new.py", "file_ext": "py", "file_size_in_byte": 2337, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "PIL.Image.open", "line_number": 9, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 9, "usage_type": "name"}, {"api_name": "PIL.Image.new", "line_number": 13, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 13, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 20, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 22, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 22, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.int", "line_number": 23, "usage_type": "attribute"}, {"api_name": "numpy.argmax", "line_number": 35, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 47, "usage_type": "call"}, {"api_name": "os.path", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 52, "usage_type": "call"}, {"api_name": "os.path", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}]} +{"seq_id": "134159869", "text": "import os\nimport csv\nimport time\nimport dateutil.relativedelta\nimport datetime\nimport os\n\ndef write_to_csv(ping_list, start_time, end_time, difference):\n os.chdir('/app')\n file_time = time.time()\n file_name = \"Ping_test_\" + str(datetime.datetime.fromtimestamp(file_time).strftime('%H:%M:%S-%d-%m-%Y'))\n with open(file_name, 'w', newline='') as csvfile:\n cwriter = csv.writer(csvfile, delimiter='\\n')\n cwriter.writerow(ping_list)\n cwriter.writerow([start_time])\n cwriter.writerow([end_time])\n cwriter.writerow([difference])\n\n\ndef format_data(ping_list, start_time, end_time):\n start_final = \"Start: \"+str(datetime.datetime.utcfromtimestamp(start_time).strftime('%H:%M:%S:%f %d-%m-%Y' ))\n end_final = \"End: \"+str(datetime.datetime.utcfromtimestamp(end_time).strftime('%H:%M:%S:%f %d-%m-%Y' ))\n print(start_final)\n print(end_final)\n start_human = datetime.datetime.fromtimestamp(start_time)\n end_human = datetime.datetime.fromtimestamp(end_time)\n difference = dateutil.relativedelta.relativedelta(end_human, start_human)\n print(difference)\n write_to_csv(ping_list, start_final, end_final, difference)\n\n\ndef main():\n print(\"Starting\") \n run_counter = 5\n #run_counter = input(\"How many runs do you want to perfom? \")\n dest_ip = '1.1.1.1'\n x = 1 \n ping_list = []\n start_time = time.time()\n while x <= int(run_counter):\n os.system(\"ping -c 1 \"+dest_ip)\n ping_list.append(\"Ping:\"+str(x)+\":\"+str(time.time()))\n x+=1\n end_time = time.time()\n #print(\"Length \" + str(len(ping_list)))\n print(\"Finished\")\n format_data(ping_list, start_time, end_time)\nmain()\n", "sub_path": "test_framework/files/script.py", "file_name": "script.py", "file_ext": "py", "file_size_in_byte": 1677, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.chdir", "line_number": 9, "usage_type": "call"}, {"api_name": "time.time", "line_number": 10, "usage_type": "call"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 11, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 11, "usage_type": "attribute"}, {"api_name": "csv.writer", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcfromtimestamp", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "attribute"}, {"api_name": "datetime.datetime.fromtimestamp", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta.relativedelta.relativedelta", "line_number": 27, "usage_type": "call"}, {"api_name": "dateutil.relativedelta.relativedelta", "line_number": 27, "usage_type": "attribute"}, {"api_name": "dateutil.relativedelta", "line_number": 27, "usage_type": "name"}, {"api_name": "time.time", "line_number": 39, "usage_type": "call"}, {"api_name": "os.system", "line_number": 41, "usage_type": "call"}, {"api_name": "time.time", "line_number": 42, "usage_type": "call"}, {"api_name": "time.time", "line_number": 44, "usage_type": "call"}]} +{"seq_id": "261901440", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Jesse Rubin - project Euler\n\"\"\"\nCrack-free Walls\nProblem 215\nConsider the problem of building a wall out of 2×1 and 3×1 bricks\n(horizontal×vertical dimensions) such that, for extra strength, the gaps\nbetween horizontally-adjacent bricks never line up in consecutive layers,\ni.e. never form a \"running crack\".\n\nFor example, the following 9×3 wall is not acceptable due to the running crack\nshown in red:\n\n|# # #|# #|# #|# #|\n|# #|# # #|# #|# #|\n|# # #|# # #|# # #|\n\nThere are eight ways of forming a crack-free 9×3 wall, written W(9,3) = 8.\n\nCalculate W(32,10).\n\"\"\"\nfrom __future__ import generators\nfrom bib.maths import disjoint\nfrom bib.decorations import cash_it\nfrom itertools import combinations\nfrom collections import defaultdict\n\n\n@cash_it\ndef brick_cracks(remaining, legos=(2, 3), cur_layer=None):\n \"\"\"\n\n Args:\n remaining (int): Spaces to fill\n legos (tuple): lego brick sizes to use\n cur_layer (tuple): current layer\n\n Yields:\n tuple: permutations of brick combinations\n\n \"\"\"\n if cur_layer is None:\n for lego in legos:\n for layer in brick_cracks(remaining-lego, legos, tuple([lego])):\n yield layer\n else:\n for n in [l for l in legos if l <= remaining]:\n for layer in brick_cracks(remaining-n, legos, tuple(list(cur_layer)+[n])):\n yield layer\n if remaining == 0: yield tuple(sum(cur_layer[0:i]) for i in range(1, len(cur_layer)))\n if remaining == 1: raise StopIteration\n\n\ndef W(width, height):\n cracks = set(layer for layer in brick_cracks(width)) # crack/brick layers\n disjoints = defaultdict(set) # disjoint layer combos dictionary\n for a, b in combinations(cracks, 2): # for each crack combination...\n if disjoint(a, b): # valid if cracks are disjoint...\n disjoints[a].add(b) # add b to the disjoing-crack-layers to a\n disjoints[b].add(a) # add a to the disjoing-crack-layers to b\n\n @cash_it # cache / memozation decorator for recurssive function\n def layer_combos(remaining, cur):\n \"\"\"Count layer combos using the disjoint sets dictionary\"\"\"\n if remaining == 0: return 1\n return sum(layer_combos(remaining-1, d) for d in disjoints[cur])\n\n # return count of all layer combos for each starting layer\n return sum(layer_combos(height-1, layer) for layer in disjoints)\n\n\ndef p215():\n return W(32, 10)\n\n\nif __name__ == '__main__':\n assert 8 == W(9, 3)\n ANSWER = p215()\n print(\"CRACK FREE WALLS: {}\".format(ANSWER))\n", "sub_path": "done/py/euler_215.py", "file_name": "euler_215.py", "file_ext": "py", "file_size_in_byte": 2591, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "bib.decorations.cash_it", "line_number": 30, "usage_type": "name"}, {"api_name": "collections.defaultdict", "line_number": 57, "usage_type": "call"}, {"api_name": "itertools.combinations", "line_number": 58, "usage_type": "call"}, {"api_name": "bib.maths.disjoint", "line_number": 59, "usage_type": "call"}, {"api_name": "bib.decorations.cash_it", "line_number": 63, "usage_type": "name"}]} +{"seq_id": "557996547", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 15 14:40:46 2018\n\n@author: user\n\"\"\"\n\n#----------------------------------------------- \n# =============================================================================\n# import tika\n# from tika import parser\n# parsed = parser.from_file(r'C:\\Users\\user\\Downloads\\Anthem.docx', xmlContent=True)\n# \n# =============================================================================\n\n\nfrom neo4j.v1 import GraphDatabase\n\nuri = \"bolt://localhost:7687\"\ndriver = GraphDatabase.driver(uri, auth=(\"neo4j\", \"Viren@123\"))\n\n\n \ndef createNode(title,parent,prop):\n \n with driver.session() as session:\n \n with session.begin_transaction() as tx:\n result=tx.run(\"CREATE (n:Subnode { name : {title} , parent : {parent} , prop:{prop} })\",parent=parent,title=title,prop=prop)\n \ndef createMultipleRelations():\n \n with driver.session() as session:\n \n with session.begin_transaction() as tx:\n result=tx.run(\"MATCH (a:Subnode),(b:Subnode) WHERE a.name = b.parent CREATE (a)-[r:type]->(b) RETURN a,b\")\n \n \ndef deleteSelfRelations():\n \n with driver.session() as session:\n \n with session.begin_transaction() as tx:\n result=tx.run(\"MATCH (a:Subnode)-[r:type]->(a:Subnode) Delete r RETURN a\")\n \n\nimport mammoth\nwith open(r\"C:\\Users\\user\\Desktop\\Anthem.docx\", \"rb\") as docx_file:\n result = mammoth.convert_to_html(docx_file)\n html = result.value # The generated HTML\n messages = result.messages # Any messages, such as warnings during conversion\nf = open(r\"C:\\Users\\user\\Desktop\\Anthem.html\",\"w\")\nf.write(html)\n\nfrom bs4 import BeautifulSoup\nsoup = BeautifulSoup(html,'html.parser')\n#soup.prettify()\n\nblacklist = [\"img\",'table']\nfor tag in soup.findAll():\n if tag.name.lower() in blacklist:\n # blacklisted tags are removed in their entirety\n tag.extract()\n\n\n\n\nfrom nltk.tokenize import sent_tokenize,word_tokenize\nfrom nltk.corpus import stopwords\nfrom string import punctuation\nimport pandas as pd\nfrom nltk.stem.snowball import SnowballStemmer\nstemmer = SnowballStemmer(\"english\")\n\n\n\n# =============================================================================\n# \n# key_section={}\n# for i in user_input_words:\n# temp=[]\n# for key,val in d_stem.items(): \n# if i in val:\n# temp.append(key)\n# key_section[i]=temp\n# # =============================================================================\n# =============================================================================\n# for item in word_sent[2:4]:\n# tokenized = nltk.word_tokenize(item)\n# tagged = nltk.pos_tag(tokenized)\n# print(tagged)\n# \n# \n# =============================================================================\n\nall_h1=soup.find_all('h1')\n\nd_h1={}\nfor h1 in all_h1:\n first=h1\n second=first.find_next('h1')\n temp=\"\"\n \n while first.findNext()!=second:\n if str(first.findNext().text) != \"\":\n temp+= str(first.findNext())+\" \"\n first=first.findNext()\n #if temp!=\" Please see “Therapy Services” later in this section. \" and temp!=' See “Therapy Services” later in this section. ' and temp!=\"\":\n temp=temp.replace('.',\" . \")\n temp=temp.replace('(',\" ( \")\n temp=temp.replace(')',\" ) \")\n temp=temp.replace('<',\" <\")\n temp=temp.replace('>',\"> \")\n d_h1[str(h1.text)]=temp\n\n\nd_h2={}\n\nfor h1,h1_val in d_h1.items():\n d_unio={}\n soup2 = BeautifulSoup(h1_val,'html.parser')\n all_h2=soup2.find_all('h2')\n if len(all_h2)!=0 :\n for h2 in all_h2:\n first=h2\n second=first.find_next('h2')\n temp=\"\"\n \n while first.findNext()!=second:\n if str(first.findNext().text) != \"\":\n temp+= str(first.findNext())+\" \"\n first=first.findNext()\n #if temp!=\" Please see “Therapy Services” later in this section. \" and temp!=' See “Therapy Services” later in this section. ' and temp!=\"\":\n temp=temp.replace('.',\" . \")\n d_unio[h2.text]=temp\n d_h2[str(h1)]=d_unio\n else:\n d_h2[str(h1)]={str(h1):h1_val}\n print(\"--------------------\")\n\n\n \n\nfor item,value in d_h2.items():\n print(item,\"\\n\",value,'-----------------','\\n') \n \n\n\n\nd_kwds={}\nfor key,val in d_h2.items():\n# print(val,\"\\n\\n\\n\")\n for key2,val2 in val.items():\n# print(\"dsadad\\n\",key2)\n if val2 is not None:\n sents = sent_tokenize(val2)\n word_sent = word_tokenize(val2.lower())\n _stopwords = set(stopwords.words('english') + list(punctuation)+['·',\n '’',\n '“',\n '”'])\n section_words=[word for word in word_sent if word not in _stopwords]\n d_kwds[key2]=list(word for word in set(section_words) if (word.isalpha() and len(word)>2 and len(set(word))!=1))\n \n \n \n \nd_stem={}\nfor key,val in d_kwds.items():\n singles = [stemmer.stem(i) for i in val]\n temp=set(singles)\n d_stem[key]=list(temp)\n \n\n\nbag_of_words2=[]\nfor i,j in d_stem.items():\n bag_of_words2.append(j)\nnew_bag=[]\nfor list_word in bag_of_words2:\n for i in list_word:\n if i.isalpha():\n new_bag.append(i) \nnew_bag=list(set(new_bag))\n \n \n\n\n \n \n#user_input=input(\"please enter your query:\\n\")\nuser_input=\"am i eligible for wheelchair benefits\"\nsents = sent_tokenize(user_input)\nword_sent = word_tokenize(user_input.lower())\n_stopwords = set(stopwords.words('english') + list(punctuation))\nuser_input_words=[word for word in word_sent if word not in _stopwords]\nuser_input_words=[stemmer.stem(i) for i in user_input_words]\nuser_input_words=[i for i in user_input_words if (i in new_bag and i not in ('claim','want','know','cover','elig'))]\n\n\nkey_section={}\nfor i in user_input_words:\n temp=[]\n for key,val in d_stem.items(): \n if i in val:\n temp.append(key)\n key_section[i]=set(temp)\n\n\nif len(key_section.keys())!=0: \n final_cmn_section= list(set.intersection(*map(set,key_section.values())))\n\n\n\n\n\n#for i in final_cmn_section:\n# #print(i)\n# if len(d_ms[i].split())>250:\n# sents = sent_tokenize(d_ms[i])\n# #print(sents)\n# for sen in sents:\n# checklist=sen.lower().split()\n# checklist=[stemmer.stem(y) for y in checklist]\n# \n# #print(checklist,\"\\n\")\n# if all(x in checklist for x in user_input_words):\n# print(sen,\"\\n\")\n# else: \n# print(i,\"\\n\",d_ms[i],\"\\n\\n\\n\")\n\n# \nprint(\"Parents are-- \\n\",final_cmn_section)\n\n\n\n\n#def find_parent_data(lis,kwds):\n## print(kwds,lis)\n# for i in lis:\n# for key,val in d_h2.items():\n# for key2,val2 in val.items():\n# if i==key2:\n## print(\"---------\",key2)\n# soup4= BeautifulSoup(val2,'html.parser')\n# ps=soup4.find_all('p')\n# for p1 in ps:\n# checklist=(str(p1)).lower().split()\n# checklist=[stemmer.stem(y) for y in checklist]\n## print(checklist)\n## print(p1)\n# if any(x in checklist for x in kwds):\n# print(p1,\"\\n\")\n## else:\n## print(\"else\")\n## print(\"\"+key2+\" :
\\n\")\n## print(val2,\"\\n\\n\")\n# \n\n\ndef find_parent_data(lis,kwds):\n# print(kwds,lis)\n res=\"\"\n for i in lis:\n for key,val in d_h2.items():\n for key2,val2 in val.items():\n if i==key2:\n# print(\"---------\",key2)\n# print(\"---\",key2)\n soup4= BeautifulSoup(val2,'html.parser')\n ps=soup4.find_all('p')\n for p1 in ps:\n# print(p1)\n checklist=str(p1).lower().split()\n checklist=[stemmer.stem(y) for y in checklist]\n# print(checklist)\n# print(p1)\n if all(x in checklist for x in kwds):\n# print(p1,\"\\n\")\n res+=str(p1.text)+\"
\"+\"first if\"\n# print(\"iiffififif\")\n else:\n if any(x in checklist for x in kwds):\n res+=str(p1.text)+\"
\"\n \n return res \n# if len(res.strip())==0:\n# for p1 in ps:\n# checklist=str(p1).lower().split()\n# checklist=[stemmer.stem(y) for y in checklist]\n## print(checklist)\n## print(p1)\n# if any(x in checklist for x in kwds):\n## print(p1,\"\\n\")\n# res+=\"dadsad\"+str(p1)+\"
\"+\"ddd\"\n \n\nres=find_parent_data(final_cmn_section,user_input_words) \n#print(res)\n\n\nsoupt = BeautifulSoup(html,'html.parser')\ntrows = soupt.find_all('tr')\n\nif len(final_cmn_section)==0:\n print(\"our customer representative will contact you shortly\")\nelse:\n for i in final_cmn_section:\n temp=i.split('and')\n# print(temp)\n i=temp[0]\n print(i)\n for tr in trows: \n# print(tr)\n if \"See\" in tr.text and i.strip() in tr.text:\n print(\"see sectuibn\")\n continue\n \n elif i.strip() in tr.text:\n section_tr=tr\n print(\"\\n\\n------\\n\\n\",section_tr)\n \n \n \n\n\nprint(d_h2) \n\nmain_counter=0\nfor key,value in d_h2.items():\n main_counter+=1\n print(str(main_counter)+\"out of \"+str(len(d_h2)))\n parent=key.strip()\n createNode(parent,\"root\",\"\")\n for key1,value1 in value.items():\n createNode(key1.strip(),parent,value1) \n \n \ncreateNode(\"root\",\"\",\"\") ## create root node\ncreateMultipleRelations()\ndeleteSelfRelations()\n \n \n", "sub_path": "pROJ/nEO4J.py", "file_name": "nEO4J.py", "file_ext": "py", "file_size_in_byte": 10257, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "neo4j.v1.GraphDatabase.driver", "line_number": 20, "usage_type": "call"}, {"api_name": "neo4j.v1.GraphDatabase", "line_number": 20, "usage_type": "name"}, {"api_name": "mammoth.convert_to_html", "line_number": 49, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 56, "usage_type": "call"}, {"api_name": "nltk.stem.snowball.SnowballStemmer", "line_number": 73, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 121, "usage_type": "call"}, {"api_name": "nltk.tokenize.sent_tokenize", "line_number": 156, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 157, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 158, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 158, "usage_type": "name"}, {"api_name": "string.punctuation", "line_number": 158, "usage_type": "argument"}, {"api_name": "nltk.tokenize.sent_tokenize", "line_number": 193, "usage_type": "call"}, {"api_name": "nltk.tokenize.word_tokenize", "line_number": 194, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 195, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 195, "usage_type": "name"}, {"api_name": "string.punctuation", "line_number": 195, "usage_type": "argument"}, {"api_name": "bs4.BeautifulSoup", "line_number": 270, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 302, "usage_type": "call"}]} +{"seq_id": "108526452", "text": "# -*- coding: utf-8 -*-\nfrom datetime import datetime\n\n\nclass LedgerEntry(object):\n def __init__(self):\n self.date = None\n self.description = None\n self.change = None\n\n def __lt__(self, other):\n if self.date < other.date:\n return True\n elif self.description < other.description:\n return True\n else:\n return self.change < other.change\n\n\ndef create_entry(date, description, change):\n entry = LedgerEntry()\n entry.date = datetime.strptime(date, '%Y-%m-%d')\n entry.description = description\n entry.change = change\n return entry\n\n\ndef format_entries(currency, locale, entries):\n table = create_table(locale)\n\n for entry in sorted(entries):\n date_fmt = format_date(entry, locale)\n desc_fmt = format_description(entry)\n change_fmt = format_change(currency, entry, locale)\n table += '\\n{} | {} | {}'.format(date_fmt, desc_fmt, change_fmt)\n return table\n\ndef format_change(currency, entry, locale):\n # Write entry change to table\n currency_symbol = {'USD': '$', 'EUR': u'€'}\n change_str, change_fmt = '{:>13}', ''\n if locale == 'en_US':\n left, right = ('', ' ') if entry.change >= 0 else ('(', ')')\n change_fmt = '{}{}{:0,.2f}{}'.format(left,\n currency_symbol[currency],\n abs(entry.change/100),\n right)\n elif locale == 'nl_NL':\n change_fmt = '{} {:0,.2f} '.format(currency_symbol[currency],\n entry.change/100)\n change_fmt = change_fmt.translate(str.maketrans(',.', '.,'))\n return change_str.format(change_fmt)\n\n\ndef format_description(entry):\n # Write entry description to table and Truncate if necessary\n if len(entry.description) > 25:\n return entry.description[:22] + '...'\n else:\n return '{:<25}'.format(entry.description)\n\n\ndef format_date(entry, lang):\n # Write entry date to table\n if lang == 'en_US':\n return f'{entry.date.month:02}/{entry.date.day:02}/{entry.date.year:04}'\n else:\n return f'{entry.date.day:02}-{entry.date.month:02}-{entry.date.year:04}'\n\n\ndef create_table(locale):\n # Generate Header Row\n lang_to_table = {'en_US': ['Date', 'Description', 'Change'],\n 'nl_NL': ['Datum', 'Omschrijving', 'Verandering']}\n pattern = '{:<11}| {:<26}| {:<13}'\n table = pattern.format(*lang_to_table[locale])\n return table", "sub_path": "python/ledger/ledger.py", "file_name": "ledger.py", "file_ext": "py", "file_size_in_byte": 2555, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "datetime.datetime.strptime", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}]} +{"seq_id": "156431321", "text": "#pdftk A=\"${INFILE}\" cat A\"${PAGE}\" output \"${OUTFILE}\"\n#pdfcrop --margins \"3 3 3 3\" \"${OUTFILE}\"\n## Times-Roman --> LinBiolinum\n#sed -i -e 's/Times-Roman/LinBiolinum/g' \"${OUTFILE}\"\n#gs -sFONTPATH=/project/bfys/jleerdam/local/root/fonts -o \"${OUTFILE}\" -sDEVICE=pdfwrite -dPDFSETTINGS=/prepress \"${OUTFILE}\"\n#gs -o \"${OUTFILE}\" -sDEVICE=pdfwrite -sProcessColorModel=DeviceCMYK -sColorConversionStrategy=CMYK -sColorConversionStrategyForImages=CMYK \"${OUTFILE}\"\n\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument( '--inputFile', '-i', required = True )\nparser.add_argument( '--outputFile', '-o' )\nparser.add_argument( '--pages', '-p', nargs = '+', type = int, default = [ 1 ] )\nparser.add_argument( '--fontPath', '-f', default = 'fonts' )\nparser.add_argument( '--fontName', '-n', default = 'LinBiolinum' )\nparser.add_argument( '--margins', '-m', nargs = '+', type = int, default = [ 3, 3, 3, 3 ] )\n\nargs = parser.parse_args()\nassert args.inputFile.endswith('.pdf')\noutFile = args.outputFile if args.outputFile else args.inputFile\nassert outFile.endswith('.pdf')\nassert len( args.pages ) > 0\nassert len( args.margins ) == 4\n\nimport os, subprocess, tempfile\ntmp = tempfile.NamedTemporaryFile()\ntmpFile = os.path.realpath(tmp.name)\ntmp.close()\nfor page in args.pages :\n subprocess.call( [ 'pdftk', 'A=%s' % args.inputFile, 'cat', 'A%d' % page, 'output', tmpFile ] )\n subprocess.call( [ 'pdfcrop', '--margins', '%d %d %d %d' % tuple( args.margins ), tmpFile, tmpFile ] )\n subprocess.call( [ 'sed', '-i', '-e', 's/Times-Roman/%s/g' % args.fontName, tmpFile ] )\n subprocess.call( [ 'gs', '-sFONTPATH=%s' % args.fontPath, '-sDEVICE=pdfwrite', '-dPDFSETTINGS=/prepress', '-o', outFile\n , tmpFile ] )\n subprocess.call( [ 'rm', tmpFile ] )\n", "sub_path": "PhysFit/P2VV/scripts/processROOTPlot.py", "file_name": "processROOTPlot.py", "file_ext": "py", "file_size_in_byte": 1788, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 9, "usage_type": "call"}, {"api_name": "tempfile.NamedTemporaryFile", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path.realpath", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "subprocess.call", "line_number": 29, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 30, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 31, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 32, "usage_type": "call"}, {"api_name": "subprocess.call", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "379270298", "text": "'''\nIn this part, we will use PCA to decompose.\nTo be continued\n'''\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport statsmodels.api as sm\nfrom statsmodels.multivariate.pca import PCA \n\ndta = '''\nto be continued\n'''\n\npca_model = PCA(dta.T, standardize=False, demean=True)\n\nfig = pca_model.plot_scree(log_scale=False)\n\nfig, ax = plt.subplots(figsize=(8, 4))\nlines = ax.plot(pca_model.factors.iloc[:,:3], lw=4, alpha=.6)\nax.set_xticklabels(dta.columns.values[::10])\nax.set_xlim(0, 51)\nax.set_xlabel(\"Year\", size=17)\nfig.subplots_adjust(.1, .1, .85, .9)\nlegend = fig.legend(lines, ['PC 1', 'PC 2', 'PC 3'], loc='center right')\nlegend.draw_frame(False)\n\nidx = pca_model.loadings.iloc[:,0].argsort()\n\ndef make_plot(labels):\n fig, ax = plt.subplots(figsize=(9,5))\n ax = dta.loc[labels].T.plot(legend=False, grid=False, ax=ax)\n dta.mean().plot(ax=ax, grid=False, label='Mean')\n ax.set_xlim(0, 51)\n fig.subplots_adjust(.1, .1, .75, .9)\n ax.set_xlabel(\"Year\", size=17)\n ax.set_ylabel(\"Fertility\", size=17)\n legend = ax.legend(*ax.get_legend_handles_labels(), loc='center left', bbox_to_anchor=(1, .5))\n legend.draw_frame(False)\n\n \nlabels = dta.index[idx[-5:]]\nmake_plot(labels)\n\nidx = pca_model.loadings.iloc[:,1].argsort()\nmake_plot(dta.index[idx[-5:]])\n\n\nmake_plot(dta.index[idx[:5]])\n\nfig, ax = plt.subplots()\npca_model.loadings.plot.scatter(x='comp_00',y='comp_01', ax=ax)\nax.set_xlabel(\"PC 1\", size=17)\nax.set_ylabel(\"PC 2\", size=17)\ndta.index[pca_model.loadings.iloc[:, 1] > .2].values", "sub_path": "Final Assignment Code/PCA.py", "file_name": "PCA.py", "file_ext": "py", "file_size_in_byte": 1544, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "statsmodels.multivariate.pca.PCA", "line_number": 15, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 19, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}]} +{"seq_id": "451238878", "text": "# -*- encoding: utf-8 -*-\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport enum\nimport json\n\n\n_JSON_TYPES = {}\n\n\ndef register_type(enum_cls):\n if enum_cls.__name__ in _JSON_TYPES:\n raise RuntimeError(f\"{enum_cls.__name__} already registered\")\n else:\n _JSON_TYPES[enum_cls.__name__] = enum_cls\n\n\nclass Encoder(json.JSONEncoder):\n def default(self, v):\n if isinstance(v, enum.Enum):\n return {\n \"__pytype__\": \"enum\",\n \"class\": type(v).__name__,\n \"name\": v.name,\n }\n else:\n return super().default(v)\n\n\ndef decode_enum(v):\n if v.get(\"__pytype__\") == \"enum\":\n cls_name = v[\"class\"]\n enum_cls = _JSON_TYPES[cls_name]\n enum_name = v[\"name\"]\n return enum_cls[enum_name]\n return v\n\n\ndef dumps(v):\n return json.dumps(v, cls=Encoder)\n\n\ndef loads(v):\n return json.loads(v, object_hook=decode_enum)\n", "sub_path": "mergify_engine/json.py", "file_name": "json.py", "file_ext": "py", "file_size_in_byte": 1445, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "json.JSONEncoder", "line_number": 28, "usage_type": "attribute"}, {"api_name": "enum.Enum", "line_number": 30, "usage_type": "attribute"}, {"api_name": "json.dumps", "line_number": 50, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "29628313", "text": "\"\"\"Script to compute embedded space of desired data and save to disk.\r\n\r\nThis script uses TSNE to project some predefined data into 2D. In the end, the 2D data will be\r\nplotted. The plot and the 2D data (as .npy) will be stored to disk. Input and output data are\r\ndefined by default in settings/scripts/compute_embedded_space.yaml.\r\n\r\n\"\"\"\r\nimport logging\r\n\r\nimport tqdm\r\nimport numpy as np\r\nimport pandas as pd\r\n\r\nfrom lib.manifold.tsne import TSNE\r\nfrom lib.config.general_config import Config\r\nfrom lib.util.matplotblib_util import save_scatter_plot_with_classes\r\nfrom lib.util.logging_util import configure_logging_verbosity\r\nfrom lib.util.argparse_util import default_config_parse\r\n\r\nLOGGER = logging.getLogger(__name__)\r\n\r\n\r\ndef _load_data_from_directory(feature_path, type_container=None):\r\n \"\"\"Load data from directory and prepare.\r\n\r\n Args:\r\n feature_path (Str): Path to bounding box features.\r\n type_container (pandas.DataFrame or None): pandas Dataframe holding the types of whole\r\n dataset. Index must match the file names in feature_path.\r\n\r\n Returns:\r\n numpy.ndarray(numpy.float): Features of bounding boxes in size [n_samples, n_features]\r\n types (str): Class type refering to each sample. Only returned if type_container is not None\r\n identifier (str): Uniqie label identifier. Only returned if type_container is not None.\r\n\r\n \"\"\"\r\n data = []\r\n file_list = list(feature_path.glob('*'))\r\n for path_to_data in tqdm.tqdm(file_list, desc='Load features'):\r\n data.append(np.load(path_to_data))\r\n\r\n data = np.array(data)\r\n\r\n output = data\r\n\r\n if type_container is not None:\r\n identifier = np.array([file.stem for file in file_list])\r\n types = np.array([type_container.loc[file.stem] for file in file_list])\r\n output = (data, types, identifier)\r\n\r\n return output\r\n\r\n\r\ndef save_embedded_features(feature_path, label_path, output_path, output_plot_path):\r\n \"\"\"Perform TSNE and save features.\r\n\r\n Args:\r\n feature_path (str or pathlib.Path): Path to box features.\r\n label_path (str or pathlib.Path): Path to pickled pandas Data Frame of labels.\r\n output_path (str or pathlib.Path or None): Path to save embedded features to. Does not save\r\n if None.\r\n output_plot_path (str or pathlib.Path or None): Path to save plot to. Does not save if None.\r\n\r\n \"\"\"\r\n LOGGER.info(\"Save embedded features ... \")\r\n type_container = pd.read_pickle(str(label_path))['type']\r\n data, types, _ = _load_data_from_directory(feature_path, type_container)\r\n\r\n # Normalize data\r\n data -= np.mean(data)\r\n data /= np.std(data)\r\n embedded_space = TSNE().fit(data)\r\n if output_path:\r\n np.save(output_path, embedded_space)\r\n\r\n if output_plot_path:\r\n class_color_dict = {\r\n 'Car': 'b',\r\n 'Van': 'g',\r\n 'Truck': 'r',\r\n 'Pedestrian': 'c',\r\n 'Person_sitting': 'm',\r\n 'Cyclist': 'k',\r\n 'Tram': 'y'\r\n }\r\n\r\n save_scatter_plot_with_classes(output_path=output_plot_path,\r\n types=types,\r\n data=embedded_space,\r\n class_color_dict=class_color_dict)\r\n\r\n\r\ndef _main():\r\n \"\"\"Main script.\"\"\"\r\n args = default_config_parse(default_config_path='settings/scripts/compute_embedded_space.yaml')\r\n configure_logging_verbosity(verbose=args.verbose)\r\n config = Config.build_from_yaml(args.config)\r\n save_embedded_features(**config.config)\r\n\r\n\r\nif __name__ == '__main__':\r\n _main()\r\n", "sub_path": "scripts/compute_embedded_space.py", "file_name": "compute_embedded_space.py", "file_ext": "py", "file_size_in_byte": 3651, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "logging.getLogger", "line_number": 20, "usage_type": "call"}, {"api_name": "tqdm.tqdm", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.load", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 47, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 48, "usage_type": "call"}, {"api_name": "pandas.read_pickle", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 71, "usage_type": "call"}, {"api_name": "lib.manifold.tsne.TSNE", "line_number": 72, "usage_type": "call"}, {"api_name": "numpy.save", "line_number": 74, "usage_type": "call"}, {"api_name": "lib.util.matplotblib_util.save_scatter_plot_with_classes", "line_number": 87, "usage_type": "call"}, {"api_name": "lib.util.argparse_util.default_config_parse", "line_number": 95, "usage_type": "call"}, {"api_name": "lib.util.logging_util.configure_logging_verbosity", "line_number": 96, "usage_type": "call"}, {"api_name": "lib.config.general_config.Config.build_from_yaml", "line_number": 97, "usage_type": "call"}, {"api_name": "lib.config.general_config.Config", "line_number": 97, "usage_type": "name"}]} +{"seq_id": "114627780", "text": "from flask import Flask\nfrom flask.ext.sqlalchemy import SQLAlchemy\nfrom flask import jsonify\nimport os\n\napp = Flask(__name__)\napp.config.from_object(os.environ['APP_SETTINGS'])\ndb = SQLAlchemy(app)\n\n\nfrom super_star_model import *\n\n@app.route('/')\ndef list_all():\n super_stars = Super_Star.query.all()\n names = [s.name for s in super_stars]\n return jsonify(name=names) \n\n@app.route('/')\ndef basic_stats(name):\n super_star = Super_Star.query.filter_by(name=name).first()\n return jsonify(name=super_star.name,\n height=super_star.height,\n weight=super_star.weight)\n\nif __name__ == '__main__':\n app.run()\n\n\n", "sub_path": "WWEAPI/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 664, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 7, "usage_type": "attribute"}, {"api_name": "flask.ext.sqlalchemy.SQLAlchemy", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 17, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "209437787", "text": "# coding:utf-8\n\nimport wx\nfrom common import CmdID, PostEvent, my_EVT_COMMAND_EVENT, MessageBox\nfrom transport import SSHExecuteError\nfrom os_win import os_ext\nfrom HostOps import get_global_host\n\n\nclass myToolBar(wx.ToolBar):\n\n def __init__(self, parent):\n\n wx.ToolBar.__init__(self, parent, wx.ID_ANY, style=wx.TB_FLAT | wx.TB_HORIZONTAL)\n\n self.host = get_global_host()\n\n bmp1 = wx.Bitmap(wx.Image(\"res/imageres_25.ico\").Scale(32, 32))\n bmp2 = wx.Bitmap(wx.Image(\"res/imageres_185.ico\").Scale(32, 32))\n bmp3 = wx.Bitmap(wx.Image(\"res/imageres_5303.ico\").Scale(32, 32))\n bmp4 = wx.Bitmap(wx.Image(\"res/imageres_171.ico\").Scale(32, 32))\n bmp5 = wx.Bitmap(wx.Image(\"res/imageres_184.ico\").Scale(32, 32))\n\n self.edit = wx.TextCtrl(self, wx.ID_ANY, style=wx.TE_PROCESS_ENTER, pos=(335, 3), size=(400, 32))\n\n self.AddTool(CmdID.connect.value, \"\", bmp1, shortHelp=\"连接远程主机\")\n self.AddTool(CmdID.path_parent.value, \"\", bmp2, shortHelp=\"上一级目录\")\n self.AddTool(CmdID.path_back.value, \"\", bmp3, shortHelp=\"回退目录\")\n self.AddTool(CmdID.path_forward.value, \"\", bmp4, shortHelp=\"前进目录\")\n self.AddTool(CmdID.path_list.value, \"\", bmp5, shortHelp=\"历史目录\")\n self.AddControl(self.edit)\n\n self.Realize()\n\n self.Bind(wx.EVT_TOOL, self.OnButton0, id=CmdID.connect.value)\n self.Bind(wx.EVT_MENU, self.OnCommand, id=CmdID.path_parent.value, id2=CmdID.path_list2.value)\n self.Bind(my_EVT_COMMAND_EVENT, self.OnCommand)\n\n self.Bind(wx.EVT_TEXT_ENTER, self.OnEditEnter, self.edit)\n\n def OnButton0(self, evt): # 连接\n\n hosts = self.host.get_host_list()\n menu = wx.Menu()\n\n for i, x in enumerate(hosts):\n menu.Append(CmdID.connect1.value + i, x)\n\n hwnd = self.GetHandle()\n rc = os_ext.Toolbar_GetItemRect(hwnd, 0)\n pt = rc[0], rc[1] + rc[3]\n\n self.PopupMenu(menu, pt)\n\n def OnButton4(self): # 历史\n plist, pos = self.host.get_path_list()\n\n menu = wx.Menu()\n\n for i, x in enumerate(plist):\n mi = menu.AppendCheckItem(CmdID.path_list1.value + i, x)\n if i == pos:\n mi.Check()\n\n hwnd = self.GetHandle()\n rc = os_ext.Toolbar_GetItemRect(hwnd, 4)\n pt = rc[0], rc[1] + rc[3]\n\n self.PopupMenu(menu, pt)\n\n def OnCommand(self, evt): # 路径切换\n cmd = evt.GetId()\n\n if cmd == CmdID.path_list.value:\n self.OnButton4()\n return\n\n # 目录历史记录的逻辑\n # 1.目录历史记录是帮助快速切换,不记录重复的目录,不保证原来的访问顺序.\n # 2.之前访问过的目录,再次访问后会移到最后面.\n # 3.回退一次,则切换到记录列表中的上一项(如果有)\n # 4.前进一次,则切换到记录列表中的下一项(如果有)\n # 5.可以点击某个记录项,直接切换.\n # 6.通过3,4,5切换目录,不改变当前目录在记录列表中的顺序,还在原来的位置.\n # 7.如果当前位于列表的中间位置(例如通过3,4,5),再打开新目录,则覆盖后面的记录.\n\n try:\n if cmd == CmdID.path_parent.value: # 上一级路径\n self.host.path_parent()\n\n elif cmd == CmdID.path_back.value: # 回退\n self.host.path_back()\n\n elif cmd == CmdID.path_forward.value: # 前进\n self.host.path_forward()\n\n elif cmd == CmdID.path_set.value: # 通过编辑框设置路径,或双击打开路径\n self.host.path_set(evt.path)\n\n else: # 通过历史记录切换\n pidx = cmd - CmdID.path_list1.value\n self.host.path_switch(pidx)\n\n except SSHExecuteError as e:\n MessageBox(self, '错误', str(e), wx.ICON_ERROR)\n\n ds = self.host.get_path_data()\n PostEvent(CmdID.path_load, path=self.host.get_current_path(), data=ds)\n\n def OnEditEnter(self, evt):\n path = self.edit.GetLineText(0)\n PostEvent(CmdID.path_set, self, path=path)\n\n def set_path(self, path):\n self.edit.SetLabel(path)\n\n\nif __name__ == \"__main__\":\n pass\n", "sub_path": "python/CtrlWnd.py", "file_name": "CtrlWnd.py", "file_ext": "py", "file_size_in_byte": 4309, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "wx.ToolBar", "line_number": 10, "usage_type": "attribute"}, {"api_name": "wx.ToolBar.__init__", "line_number": 14, "usage_type": "call"}, {"api_name": "wx.ToolBar", "line_number": 14, "usage_type": "attribute"}, {"api_name": "wx.ID_ANY", "line_number": 14, "usage_type": "attribute"}, {"api_name": "wx.TB_FLAT", "line_number": 14, "usage_type": "attribute"}, {"api_name": "wx.TB_HORIZONTAL", "line_number": 14, "usage_type": "attribute"}, {"api_name": "HostOps.get_global_host", "line_number": 16, "usage_type": "call"}, {"api_name": "wx.Bitmap", "line_number": 18, "usage_type": "call"}, {"api_name": "wx.Image", "line_number": 18, "usage_type": "call"}, {"api_name": "wx.Bitmap", "line_number": 19, "usage_type": "call"}, {"api_name": "wx.Image", "line_number": 19, "usage_type": "call"}, {"api_name": "wx.Bitmap", "line_number": 20, "usage_type": "call"}, {"api_name": "wx.Image", "line_number": 20, "usage_type": "call"}, {"api_name": "wx.Bitmap", "line_number": 21, "usage_type": "call"}, {"api_name": "wx.Image", "line_number": 21, "usage_type": "call"}, {"api_name": "wx.Bitmap", "line_number": 22, "usage_type": "call"}, {"api_name": "wx.Image", "line_number": 22, "usage_type": "call"}, {"api_name": "wx.TextCtrl", "line_number": 24, "usage_type": "call"}, {"api_name": "wx.ID_ANY", "line_number": 24, "usage_type": "attribute"}, {"api_name": "wx.TE_PROCESS_ENTER", "line_number": 24, "usage_type": "attribute"}, {"api_name": "common.CmdID.connect", "line_number": 26, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 26, "usage_type": "name"}, {"api_name": "common.CmdID.path_parent", "line_number": 27, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 27, "usage_type": "name"}, {"api_name": "common.CmdID.path_back", "line_number": 28, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 28, "usage_type": "name"}, {"api_name": "common.CmdID.path_forward", "line_number": 29, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 29, "usage_type": "name"}, {"api_name": "common.CmdID.path_list", "line_number": 30, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 30, "usage_type": "name"}, {"api_name": "wx.EVT_TOOL", "line_number": 35, "usage_type": "attribute"}, {"api_name": "common.CmdID.connect", "line_number": 35, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 35, "usage_type": "name"}, {"api_name": "wx.EVT_MENU", "line_number": 36, "usage_type": "attribute"}, {"api_name": "common.CmdID.path_parent", "line_number": 36, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 36, "usage_type": "name"}, {"api_name": "common.CmdID.path_list2", "line_number": 36, "usage_type": "attribute"}, {"api_name": "common.my_EVT_COMMAND_EVENT", "line_number": 37, "usage_type": "argument"}, {"api_name": "wx.EVT_TEXT_ENTER", "line_number": 39, "usage_type": "attribute"}, {"api_name": "wx.Menu", "line_number": 44, "usage_type": "call"}, {"api_name": "common.CmdID.connect1", "line_number": 47, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 47, "usage_type": "name"}, {"api_name": "os_win.os_ext.Toolbar_GetItemRect", "line_number": 50, "usage_type": "call"}, {"api_name": "os_win.os_ext", "line_number": 50, "usage_type": "name"}, {"api_name": "wx.Menu", "line_number": 58, "usage_type": "call"}, {"api_name": "common.CmdID.path_list1", "line_number": 61, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 61, "usage_type": "name"}, {"api_name": "os_win.os_ext.Toolbar_GetItemRect", "line_number": 66, "usage_type": "call"}, {"api_name": "os_win.os_ext", "line_number": 66, "usage_type": "name"}, {"api_name": "common.CmdID.path_list", "line_number": 74, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 74, "usage_type": "name"}, {"api_name": "common.CmdID.path_parent", "line_number": 88, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 88, "usage_type": "name"}, {"api_name": "common.CmdID.path_back", "line_number": 91, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 91, "usage_type": "name"}, {"api_name": "common.CmdID.path_forward", "line_number": 94, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 94, "usage_type": "name"}, {"api_name": "common.CmdID.path_set", "line_number": 97, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 97, "usage_type": "name"}, {"api_name": "common.CmdID.path_list1", "line_number": 101, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 101, "usage_type": "name"}, {"api_name": "transport.SSHExecuteError", "line_number": 104, "usage_type": "name"}, {"api_name": "common.MessageBox", "line_number": 105, "usage_type": "call"}, {"api_name": "wx.ICON_ERROR", "line_number": 105, "usage_type": "attribute"}, {"api_name": "common.PostEvent", "line_number": 108, "usage_type": "call"}, {"api_name": "common.CmdID.path_load", "line_number": 108, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 108, "usage_type": "name"}, {"api_name": "common.PostEvent", "line_number": 112, "usage_type": "call"}, {"api_name": "common.CmdID.path_set", "line_number": 112, "usage_type": "attribute"}, {"api_name": "common.CmdID", "line_number": 112, "usage_type": "name"}]} +{"seq_id": "653511976", "text": "#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom lxml.html import document_fromstring\nimport re\nfrom helpers.exceptions import UrlParseError\n\ndomainUri = 'http://eatmanga.me'\n\n\ndef get_main_content(url, get=None, post=None):\n name = get_manga_name(url)\n return get('{}/Manga-Scan/{}'.format(domainUri, name))\n\n\ndef get_volumes(content=None, url=None, get=None, post=None):\n parser = document_fromstring(content).cssselect('#updates li a[href^=\"/M\"]')\n return [domainUri + i.get('href') for i in parser]\n\n\ndef get_archive_name(volume, index: int = None):\n name = get_manga_name(volume)\n idx = volume.strip('/').rfind('/')\n if idx < 0:\n return 'vol_{:0>3}'.format(index)\n return volume[idx + len(name) + 2:-1]\n\n\ndef __get_img(parser):\n img = parser.cssselect('#eatmanga_image_big')\n if not len(img):\n img = parser.cssselect('#eatmanga_image')\n if not len(img):\n return False\n return img[0].get('src')\n\n\ndef get_images(main_content=None, volume=None, get=None, post=None):\n content = get(volume)\n parser = document_fromstring(content)\n images = [__get_img(parser)]\n result = parser.cssselect('#pages')[0].cssselect('option[value*=\"page-\"]')\n pages_list = [domainUri + i.get('value') for i in result]\n\n for page in pages_list:\n content = get(page)\n img = __get_img(document_fromstring(content))\n if img:\n images.append(img)\n else:\n pass\n return images\n\n\ndef get_manga_name(url, get=None):\n name = re.search('\\\\.me/(?:upcoming/)?(?:Manga-Scan/)?([^/]+)', url)\n if not name:\n raise UrlParseError()\n return name.groups()[0]\n", "sub_path": "providers/eatmanga_me.py", "file_name": "eatmanga_me.py", "file_ext": "py", "file_size_in_byte": 1661, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "lxml.html.document_fromstring", "line_number": 17, "usage_type": "call"}, {"api_name": "lxml.html.document_fromstring", "line_number": 40, "usage_type": "call"}, {"api_name": "lxml.html.document_fromstring", "line_number": 47, "usage_type": "call"}, {"api_name": "re.search", "line_number": 56, "usage_type": "call"}, {"api_name": "helpers.exceptions.UrlParseError", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "299949881", "text": "import pydot\nimport StringIO\nimport objgraph\n\na = 1\nb = {\"2\": a, \"3\": a+1}\nd = [a, b, [a, b], []]\ndot = StringIO.StringIO()\nobjgraph.show_backrefs([d], output=dot)\ngraph = pydot.graph_from_dot_data(dot.getvalue())[0]\ngraph.write_pdf('graph.pdf')\n\n", "sub_path": "devtools/objgraph_learn.py", "file_name": "objgraph_learn.py", "file_ext": "py", "file_size_in_byte": 247, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "StringIO.StringIO", "line_number": 8, "usage_type": "call"}, {"api_name": "objgraph.show_backrefs", "line_number": 9, "usage_type": "call"}, {"api_name": "pydot.graph_from_dot_data", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "404348595", "text": "import cv2\nimport os\nimport numpy as np\n\ndef handle(data, path, file, ext):\n\tfile = file + '-edge' + ext\n\tfilename = os.path.join(path, file)\n\tif os.path.exists(filename):\n\t\tprint('%s exists!' % filename)\n\t\treturn []\n\timage = np.asarray(bytearray(data), dtype=\"uint8\")\n\timage = cv2.imdecode(image, cv2.IMREAD_GRAYSCALE)\n\timage = cv2.adaptiveThreshold(image, 255,\n cv2.ADAPTIVE_THRESH_MEAN_C,\n cv2.THRESH_BINARY,\n blockSize=9,\n C=2)\n\tcv2.imwrite(filename, image)\n\treturn [filename]", "sub_path": "Crawler/Handlers/edge.py", "file_name": "edge.py", "file_ext": "py", "file_size_in_byte": 620, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.path.join", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.imdecode", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "cv2.adaptiveThreshold", "line_number": 13, "usage_type": "call"}, {"api_name": "cv2.ADAPTIVE_THRESH_MEAN_C", "line_number": 14, "usage_type": "attribute"}, {"api_name": "cv2.THRESH_BINARY", "line_number": 15, "usage_type": "attribute"}, {"api_name": "cv2.imwrite", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "19957338", "text": "\"\"\"\nType annotations for firehose service client.\n\n[Open documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html)\n\nUsage::\n\n ```python\n import boto3\n from mypy_boto3_firehose import FirehoseClient\n\n client: FirehoseClient = boto3.client(\"firehose\")\n ```\n\"\"\"\nfrom typing import Any, Dict, List, Type\n\nfrom botocore.client import BaseClient, ClientMeta\n\nfrom .literals import DeliveryStreamTypeType\nfrom .type_defs import (\n AmazonOpenSearchServerlessDestinationConfigurationTypeDef,\n AmazonOpenSearchServerlessDestinationUpdateTypeDef,\n AmazonopensearchserviceDestinationConfigurationTypeDef,\n AmazonopensearchserviceDestinationUpdateTypeDef,\n CreateDeliveryStreamOutputTypeDef,\n DeliveryStreamEncryptionConfigurationInputTypeDef,\n DescribeDeliveryStreamOutputTypeDef,\n ElasticsearchDestinationConfigurationTypeDef,\n ElasticsearchDestinationUpdateTypeDef,\n ExtendedS3DestinationConfigurationTypeDef,\n ExtendedS3DestinationUpdateTypeDef,\n HttpEndpointDestinationConfigurationTypeDef,\n HttpEndpointDestinationUpdateTypeDef,\n KinesisStreamSourceConfigurationTypeDef,\n ListDeliveryStreamsOutputTypeDef,\n ListTagsForDeliveryStreamOutputTypeDef,\n PutRecordBatchOutputTypeDef,\n PutRecordOutputTypeDef,\n RecordTypeDef,\n RedshiftDestinationConfigurationTypeDef,\n RedshiftDestinationUpdateTypeDef,\n S3DestinationConfigurationTypeDef,\n S3DestinationUpdateTypeDef,\n SplunkDestinationConfigurationTypeDef,\n SplunkDestinationUpdateTypeDef,\n TagTypeDef,\n)\n\n__all__ = (\"FirehoseClient\",)\n\nclass BotocoreClientError(BaseException):\n MSG_TEMPLATE: str\n\n def __init__(self, error_response: Dict[str, Any], operation_name: str) -> None:\n self.response: Dict[str, Any]\n self.operation_name: str\n\nclass Exceptions:\n ClientError: Type[BotocoreClientError]\n ConcurrentModificationException: Type[BotocoreClientError]\n InvalidArgumentException: Type[BotocoreClientError]\n InvalidKMSResourceException: Type[BotocoreClientError]\n LimitExceededException: Type[BotocoreClientError]\n ResourceInUseException: Type[BotocoreClientError]\n ResourceNotFoundException: Type[BotocoreClientError]\n ServiceUnavailableException: Type[BotocoreClientError]\n\nclass FirehoseClient(BaseClient):\n \"\"\"\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html)\n \"\"\"\n\n meta: ClientMeta\n\n @property\n def exceptions(self) -> Exceptions:\n \"\"\"\n FirehoseClient exceptions.\n \"\"\"\n def can_paginate(self, operation_name: str) -> bool:\n \"\"\"\n Check if an operation can be paginated.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client.can_paginate)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html#can_paginate)\n \"\"\"\n def close(self) -> None:\n \"\"\"\n Closes underlying endpoint connections.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client.close)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html#close)\n \"\"\"\n def create_delivery_stream(\n self,\n *,\n DeliveryStreamName: str,\n DeliveryStreamType: DeliveryStreamTypeType = None,\n KinesisStreamSourceConfiguration: \"KinesisStreamSourceConfigurationTypeDef\" = None,\n DeliveryStreamEncryptionConfigurationInput: \"DeliveryStreamEncryptionConfigurationInputTypeDef\" = None,\n S3DestinationConfiguration: \"S3DestinationConfigurationTypeDef\" = None,\n ExtendedS3DestinationConfiguration: \"ExtendedS3DestinationConfigurationTypeDef\" = None,\n RedshiftDestinationConfiguration: \"RedshiftDestinationConfigurationTypeDef\" = None,\n ElasticsearchDestinationConfiguration: \"ElasticsearchDestinationConfigurationTypeDef\" = None,\n AmazonopensearchserviceDestinationConfiguration: \"AmazonopensearchserviceDestinationConfigurationTypeDef\" = None,\n SplunkDestinationConfiguration: \"SplunkDestinationConfigurationTypeDef\" = None,\n HttpEndpointDestinationConfiguration: \"HttpEndpointDestinationConfigurationTypeDef\" = None,\n Tags: List[\"TagTypeDef\"] = None,\n AmazonOpenSearchServerlessDestinationConfiguration: \"AmazonOpenSearchServerlessDestinationConfigurationTypeDef\" = None\n ) -> CreateDeliveryStreamOutputTypeDef:\n \"\"\"\n Creates a Kinesis Data Firehose delivery stream.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client.create_delivery_stream)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html#create_delivery_stream)\n \"\"\"\n def delete_delivery_stream(\n self, *, DeliveryStreamName: str, AllowForceDelete: bool = None\n ) -> Dict[str, Any]:\n \"\"\"\n Deletes a delivery stream and its data.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client.delete_delivery_stream)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html#delete_delivery_stream)\n \"\"\"\n def describe_delivery_stream(\n self, *, DeliveryStreamName: str, Limit: int = None, ExclusiveStartDestinationId: str = None\n ) -> DescribeDeliveryStreamOutputTypeDef:\n \"\"\"\n Describes the specified delivery stream and its status.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client.describe_delivery_stream)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html#describe_delivery_stream)\n \"\"\"\n def generate_presigned_url(\n self,\n ClientMethod: str,\n Params: Dict[str, Any] = None,\n ExpiresIn: int = 3600,\n HttpMethod: str = None,\n ) -> str:\n \"\"\"\n Generate a presigned url given a client, its method, and arguments.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client.generate_presigned_url)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html#generate_presigned_url)\n \"\"\"\n def list_delivery_streams(\n self,\n *,\n Limit: int = None,\n DeliveryStreamType: DeliveryStreamTypeType = None,\n ExclusiveStartDeliveryStreamName: str = None\n ) -> ListDeliveryStreamsOutputTypeDef:\n \"\"\"\n Lists your delivery streams in alphabetical order of their names.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client.list_delivery_streams)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html#list_delivery_streams)\n \"\"\"\n def list_tags_for_delivery_stream(\n self, *, DeliveryStreamName: str, ExclusiveStartTagKey: str = None, Limit: int = None\n ) -> ListTagsForDeliveryStreamOutputTypeDef:\n \"\"\"\n Lists the tags for the specified delivery stream.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client.list_tags_for_delivery_stream)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html#list_tags_for_delivery_stream)\n \"\"\"\n def put_record(\n self, *, DeliveryStreamName: str, Record: \"RecordTypeDef\"\n ) -> PutRecordOutputTypeDef:\n \"\"\"\n Writes a single data record into an Amazon Kinesis Data Firehose delivery\n stream.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client.put_record)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html#put_record)\n \"\"\"\n def put_record_batch(\n self, *, DeliveryStreamName: str, Records: List[\"RecordTypeDef\"]\n ) -> PutRecordBatchOutputTypeDef:\n \"\"\"\n Writes multiple data records into a delivery stream in a single call, which can\n achieve higher throughput per producer than when writing single records.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client.put_record_batch)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html#put_record_batch)\n \"\"\"\n def start_delivery_stream_encryption(\n self,\n *,\n DeliveryStreamName: str,\n DeliveryStreamEncryptionConfigurationInput: \"DeliveryStreamEncryptionConfigurationInputTypeDef\" = None\n ) -> Dict[str, Any]:\n \"\"\"\n Enables server-side encryption (SSE) for the delivery stream.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client.start_delivery_stream_encryption)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html#start_delivery_stream_encryption)\n \"\"\"\n def stop_delivery_stream_encryption(self, *, DeliveryStreamName: str) -> Dict[str, Any]:\n \"\"\"\n Disables server-side encryption (SSE) for the delivery stream.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client.stop_delivery_stream_encryption)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html#stop_delivery_stream_encryption)\n \"\"\"\n def tag_delivery_stream(\n self, *, DeliveryStreamName: str, Tags: List[\"TagTypeDef\"]\n ) -> Dict[str, Any]:\n \"\"\"\n Adds or updates tags for the specified delivery stream.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client.tag_delivery_stream)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html#tag_delivery_stream)\n \"\"\"\n def untag_delivery_stream(\n self, *, DeliveryStreamName: str, TagKeys: List[str]\n ) -> Dict[str, Any]:\n \"\"\"\n Removes tags from the specified delivery stream.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client.untag_delivery_stream)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html#untag_delivery_stream)\n \"\"\"\n def update_destination(\n self,\n *,\n DeliveryStreamName: str,\n CurrentDeliveryStreamVersionId: str,\n DestinationId: str,\n S3DestinationUpdate: \"S3DestinationUpdateTypeDef\" = None,\n ExtendedS3DestinationUpdate: \"ExtendedS3DestinationUpdateTypeDef\" = None,\n RedshiftDestinationUpdate: \"RedshiftDestinationUpdateTypeDef\" = None,\n ElasticsearchDestinationUpdate: \"ElasticsearchDestinationUpdateTypeDef\" = None,\n AmazonopensearchserviceDestinationUpdate: \"AmazonopensearchserviceDestinationUpdateTypeDef\" = None,\n SplunkDestinationUpdate: \"SplunkDestinationUpdateTypeDef\" = None,\n HttpEndpointDestinationUpdate: \"HttpEndpointDestinationUpdateTypeDef\" = None,\n AmazonOpenSearchServerlessDestinationUpdate: \"AmazonOpenSearchServerlessDestinationUpdateTypeDef\" = None\n ) -> Dict[str, Any]:\n \"\"\"\n Updates the specified destination of the specified delivery stream.\n\n [Show boto3 documentation](https://boto3.amazonaws.com/v1/documentation/api/1.26.45/reference/services/firehose.html#Firehose.Client.update_destination)\n [Show boto3-stubs documentation](https://vemel.github.io/boto3_stubs_docs/mypy_boto3_firehose/client.html#update_destination)\n \"\"\"\n", "sub_path": "typings/mypy_boto3_firehose/client.pyi", "file_name": "client.pyi", "file_ext": "pyi", "file_size_in_byte": 12616, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "typing.Dict", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 54, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 55, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 60, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 61, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 62, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 63, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 64, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 65, "usage_type": "name"}, {"api_name": "typing.Type", "line_number": 66, "usage_type": "name"}, {"api_name": "botocore.client.BaseClient", "line_number": 68, "usage_type": "name"}, {"api_name": "botocore.client.ClientMeta", "line_number": 74, "usage_type": "name"}, {"api_name": "literals.DeliveryStreamTypeType", "line_number": 99, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 109, "usage_type": "name"}, {"api_name": "type_defs.CreateDeliveryStreamOutputTypeDef", "line_number": 111, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 120, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 120, "usage_type": "name"}, {"api_name": "type_defs.DescribeDeliveryStreamOutputTypeDef", "line_number": 129, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 139, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 139, "usage_type": "name"}, {"api_name": "literals.DeliveryStreamTypeType", "line_number": 153, "usage_type": "name"}, {"api_name": "type_defs.ListDeliveryStreamsOutputTypeDef", "line_number": 155, "usage_type": "name"}, {"api_name": "type_defs.ListTagsForDeliveryStreamOutputTypeDef", "line_number": 164, "usage_type": "name"}, {"api_name": "type_defs.PutRecordOutputTypeDef", "line_number": 173, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 182, "usage_type": "name"}, {"api_name": "type_defs.PutRecordBatchOutputTypeDef", "line_number": 183, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 196, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 196, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 203, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 203, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 211, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 212, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 212, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 220, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 221, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 221, "usage_type": "name"}, {"api_name": "typing.Dict", "line_number": 242, "usage_type": "name"}, {"api_name": "typing.Any", "line_number": 242, "usage_type": "name"}]} +{"seq_id": "95810818", "text": "import logging\nimport boto3\nfrom decouple import config\nfrom botocore.exceptions import ClientError\n\nAWS_STORAGE_BUCKET_NAME = config(\"AWS_STORAGE_BUCKET_NAME\")\n\n\ndef upload_to_s3(file_object, file_name):\n \"\"\"Upload a file to an S3 bucket\n\n :param file_name: File to upload\n :param bucket: Bucket to upload to\n :param object_name: S3 object name. If not specified then file_name is used\n :return: True if file was uploaded, else False\n \"\"\"\n\n s3_client = boto3.client('s3')\n try:\n s3_client.upload_fileobj(\n Fileobj=file_object,\n Bucket=AWS_STORAGE_BUCKET_NAME,\n Key=file_name\n )\n except ClientError as e:\n logging.error(e)\n return False\n return True\n", "sub_path": "y_tinkering/reddit/upload_to_s3.py", "file_name": "upload_to_s3.py", "file_ext": "py", "file_size_in_byte": 741, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "decouple.config", "line_number": 6, "usage_type": "call"}, {"api_name": "boto3.client", "line_number": 18, "usage_type": "call"}, {"api_name": "botocore.exceptions.ClientError", "line_number": 25, "usage_type": "name"}, {"api_name": "logging.error", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "357257545", "text": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport torch\nimport torch.utils.data as data\nfrom torch.autograd import Variable\nimport torchvision.transforms as transforms\n\nimport os\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom PIL import Image\nimport numpy.random as random\n# if sys.version_info[0] == 2:\n# import cPickle as pickle\n# else:\n# import pickle\n\n\ndef get_img(img_dir, seq, imsize, bbox=None, transform=None):\n imgs = []\n for i in seq:\n img_path = img_dir + i +\".jpg\"\n img = Image.open(img_path).convert('RGB')\n width, height = img.size\n if bbox is not None:\n r = int(np.maximum(bbox[2], bbox[3]) * 0.75)\n center_x = int((2 * bbox[0] + bbox[2]) / 2)\n center_y = int((2 * bbox[1] + bbox[3]) / 2)\n y1 = np.maximum(0, center_y - r)\n y2 = np.minimum(height, center_y + r)\n x1 = np.maximum(0, center_x - r)\n x2 = np.minimum(width, center_x + r)\n img = img.crop([x1, y1, x2, y2])\n\n if transform is not None:\n img = transform(img)\n imgs.append(img)\n imgs = torch.stack(imgs, 0)\n\n return imgs\n\n\nclass FaceDataset(data.Dataset):\n def __init__(self, data_dir, base_size=64,\n transform=None, target_transform=None):\n self.transform = transform\n self.imsize = base_size\n self.word, self.filenames, self.img_names = self.load_filenames(data_dir)\n\n def load_filenames(self, data_dir):\n self.dir = data_dir\n word = os.listdir(data_dir)\n\n filenames = []\n img_names = {}\n self.word_idx = {}\n n = 0\n for w in word:\n self.word_idx[w] = n\n n += 1\n img_dir = os.listdir(data_dir+w)\n img_dir.sort()\n wlen = len(w)\n for i in img_dir:\n ID = w + \"/\" + i[:wlen+7]\n seq = i[wlen+7:wlen+9]\n try:\n img_names[ID].append(seq)\n except:\n filenames.append(ID)\n img_names[ID] = [w, seq]\n print(\"load %s ID file from %s\" %(len(filenames), data_dir))\n\n\n return word, filenames, img_names\n\n def __getitem__(self, index):\n #\n file = self.filenames[index]\n ID = file[-6:-1]\n word = self.img_names[file][0]\n seq = self.img_names[file][1:]\n seq_len = len(seq)\n\n imgs = get_img(self.dir+file, seq, self.imsize, \n transform=self.transform)\n\n return imgs, word, ID, seq\n\n\n def __len__(self):\n return len(self.filenames)\n", "sub_path": "mi-flow/dataset.py", "file_name": "dataset.py", "file_ext": "py", "file_size_in_byte": 2741, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "PIL.Image.open", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 27, "usage_type": "name"}, {"api_name": "numpy.maximum", "line_number": 30, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 35, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 36, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 42, "usage_type": "call"}, {"api_name": "torch.utils.data.Dataset", "line_number": 47, "usage_type": "attribute"}, {"api_name": "torch.utils.data", "line_number": 47, "usage_type": "name"}, {"api_name": "os.listdir", "line_number": 56, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 65, "usage_type": "call"}]} +{"seq_id": "70782614", "text": "# module created to demonstrate j integra integration\n# Feb 23 2018\n\nimport requests\nimport json\nimport pprint\nimport hashlib\nimport random\nimport string\nimport os\n\nbaseUrl = 'http://integraledger.azure-api.net/api-clone/'\n\ndefault_value = 'edeff0a38aea9ddedd09sfddddbdj8fc8d9gdff3cfdcdfddssdddsdscf3s07e4dd4ca75scd7d39fa70b30'\ndefault_id = '6452ecf9-5f6d-6b5e-9d75-cad09080a8eb'\n\n\ndef hashInIndex(dir, file):\n path = \"./data/\" + dir + \"/\" + dir + \"_index.txt\"\n file_path = \"./data/\" + dir + \"/\" + file + \".txt\"\n index_file = open(path, 'r')\n theIndex = index_file.read()\n # print (\"theIndex: \" + theIndex)\n theFileHash = hashFile(file_path)\n file_in_index = (theFileHash in theIndex)\n if file_in_index:\n indexRegistered = hashFile(path)\n res = valueexists(indexRegistered)\n return res\n else:\n return \"False\"\n\n\ndef generate_random(N):\n ran = ''.join(random.choices(string.ascii_uppercase + string.digits, k=N))\n return ran\n\n\ndef createRandomDirectory(theBasePath):\n theRan = generate_random(10)\n path = \"./\" + theBasePath + theRan\n\n try:\n os.mkdir(path)\n return theRan\n except OSError:\n print(\"Creation of the directory %s failed\" % path)\n else:\n print(\"Successfully created the directory %s \" % path)\n\n\ndef createRandomFile(theBasePath):\n theRan = generate_random(10)\n thePath = theBasePath + theRan + \".txt\"\n f = open(thePath, \"w+\")\n f.write(theRan)\n f.close()\n return thePath\n\n\ndef hashSingleFile(theFile):\n hasher = hashlib.sha3_256()\n with open(theFile, 'rb') as afile:\n buf = afile.read()\n hasher.update(buf)\n return hasher.hexdigest()\n\n\ndef hashFile(theFile):\n hasher = hashlib.sha3_256()\n with open(\"./\" + theFile, 'rb') as afile:\n buf = afile.read()\n hasher.update(buf)\n return hasher.hexdigest()\n\n\ndef valueexists(theID):\n if (theID == \"\"):\n theID = default_value\n url = baseUrl + 'valueexists/' + theID\n req = requests.get(url)\n proData = req.json()\n exists = proData['exists']\n if exists:\n theData = proData['data'][0]\n theRecord = theData['Record']\n theId = theRecord['identityId']\n return theId\n else:\n return \"False\"\n\n\ndef registerIdentity(theValue):\n headers = {'content-type': 'application/json'}\n url = baseUrl + 'registerIdentity'\n\n data = {\"identityType\": \"com.integraledger.lmatid\", \"metaData\": \"md\", \"value\": theValue}\n params = {}\n # {'sessionKey': '9ebbd0b25760557393a43064a92bae539d962103', 'format': 'xml', 'platformId': 1}\n\n req = requests.post(url, params=params, data=json.dumps(data), headers=headers)\n proData = req.json()\n return proData\n\n\ndef identityExists(theID):\n if (theID == \"\"):\n theID = default_id\n url = baseUrl + 'identityexists/' + theID\n req = requests.get(url)\n proData = req.json()\n exists = proData['exists']\n if exists:\n theData = proData['data']\n # theRecord = theData['Record']\n theId = theData['identityId']\n return theData\n else:\n return \"False\"\n\n\ndef menu():\n print(\" \")\n print(\" \")\n print(\"************MAIN MENU**************\")\n # time.sleep(1)\n print()\n pp = pprint.PrettyPrinter(indent=4)\n choice = input(\"\"\"\n 1: identityExists\n 2: valueExists\n 3: hash file\n 4: registerIdentity\n 5: registerIdentity for file\n 6: generate random file and register hash\n 7: generate random directory and index/register\n 8: valueExistsOfRegistered\n\n Please enter your choice:\n\n \"\"\")\n\n if choice == \"1\" or choice == \"1\":\n theVal = input(\"What is the identityId? \")\n res = identityExists(theVal)\n pp.pprint(res)\n print(\"\")\n elif choice == \"2\" or choice == \"2\":\n theVal = input(\"What is the value? \")\n res = valueexists(theVal)\n pp.pprint(res)\n print(\"\")\n elif choice == \"3\" or choice == \"3\":\n theVal = input(\"What is the file to hash? \")\n res = (\"./data/\" + theVal)\n pp.pprint(res)\n print(\"\")\n elif choice == \"4\" or choice == \"4\":\n theVal = input(\"What is the value to register? \")\n res = registerIdentity(theVal)\n pp.pprint(res)\n print(\"\")\n elif choice == \"5\" or choice == \"5\":\n theVal = input(\"What is the file to register? \")\n theHash = hashSingleFile(\"data/\" + theVal)\n res = registerIdentity(theHash)\n pp.pprint(res)\n print(\"\")\n elif choice == \"6\" or choice == \"6\":\n # theVal = input(\"What is the file to register? \")\n theFileName = createRandomFile(\"data/\")\n res = hashFile(theFileName)\n theRes = registerIdentity(res)\n pp.pprint(theRes)\n print(\"\")\n elif choice == \"7\" or choice == \"7\":\n # theVal = input(\"What is the file to register? \")\n theBasePath = \"./data/\"\n theDir = createRandomDirectory(theBasePath)\n hashArray = []\n for i in range(1, 10):\n theFileName = createRandomFile(\"data/\" + theDir + \"/\")\n theFileHash = hashFile(theFileName)\n hashArray.append(theFileHash)\n str = '|'.join([x for x in hashArray])\n thePath = \"./data/\" + theDir + \"/\" + theDir + \"_index.txt\"\n f = open(thePath, \"w+\")\n f.write(str)\n f.close()\n indexHash = hashFile(thePath)\n res = registerIdentity(indexHash)\n pp.pprint(res)\n elif choice == \"8\" or choice == \"8\":\n theDir = input(\"What is the index \")\n if (theDir == \"\"):\n theDir = 'Y4WU694AZA'\n\n theFile = input(\"What is the file \")\n if (theFile == \"\"):\n theFile = 'GVFV93XIKD'\n\n isInIndex = hashInIndex(theDir, theFile)\n pp.pprint(isInIndex)\n\n else:\n\n print(\"You must only select one of the choices\")\n print(\"Please try again\")\n print(\" \")\n print(\" \")\n menu()\n\n\nwhile 1 == 1:\n menu()\n", "sub_path": "bchain demo.py", "file_name": "bchain demo.py", "file_ext": "py", "file_size_in_byte": 6154, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "random.choices", "line_number": 35, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 35, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 35, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 44, "usage_type": "call"}, {"api_name": "hashlib.sha3_256", "line_number": 62, "usage_type": "call"}, {"api_name": "hashlib.sha3_256", "line_number": 70, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 81, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 101, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 101, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 110, "usage_type": "call"}, {"api_name": "pprint.PrettyPrinter", "line_number": 128, "usage_type": "call"}]} +{"seq_id": "652206827", "text": "import unittest\nfrom json import dumps, loads\nfrom copy import deepcopy\n\nfrom module.enum import FieldType\nfrom module.table import Body, Row, Cell, Head\nfrom module.enum import FieldType\n\n\nclass TestBodyClass(unittest.TestCase):\n def test_set_header_object(self):\n row = Row()\n obj = Body(name=\"Table1\").set_header(row)\n\n self.assertEqual(obj.head, row)\n\n def test_set_header_lmabda(self):\n obj = Body(name=\"Table1\").set_header(lambda r: r)\n\n self.assertIsNotNone(obj.head)\n self.assertIsInstance(obj.head, Row)\n\n def test_add_row_object(self):\n obj = Body(name=\"Table1\").add_row(Row()).add_row(Row())\n\n self.assertEqual(len(obj.rows), 2)\n self.assertIsInstance(obj.rows[0], Row)\n\n def test_add_row_lambda(self):\n obj = Body(name=\"Table1\").add_row(lambda r: r).add_row(lambda r: r)\n\n self.assertEqual(len(obj.rows), 2)\n self.assertIsInstance(obj.rows[0], Row)\n\n def test_add_rows_none(self):\n obj = Body(name=\"Table1\").add_rows(None, lambda v, r: r)\n\n self.assertEqual(len(obj.rows), 0)\n\n def test_add_rows_list(self):\n lst = [dict(a=1, b=2), dict(a=2, b=1)]\n obj = Body(name=\"Table1\").add_rows(\n lst,\n lambda v, r: r\n )\n\n self.assertEqual(len(obj.rows), 2)\n self.assertIsInstance(obj.rows[0], Row)\n\n def test_to_dict_returns_none_when_rows_are_empty(self):\n obj = Body(name=\"Table1\").set_header(Row())\n self.assertEqual(obj.to_dict(), None)\n\n def test_to_dict_returns_valid_dictionary(self):\n some_list = [\n dict(\n SomeShit=\"SomeShit1\",\n Nullable=\"Nullable1\",\n OtherShit=1\n ),\n dict(\n SomeShit=\"SomeShit2\",\n Nullable=\"Nullable2\",\n OtherShit=2\n ),\n dict(\n SomeShit=\"SomeShit2\",\n Nullable=\"Nullable2\",\n OtherShit=2\n )\n ]\n\n obj = Body() \\\n .set_properties(\n name=\"Table1\",\n displayName=\"Таблица 1\"\n ) \\\n .set_header(\n lambda th: th\n .add_cells([\n Head(name=\"Head1\", displayName=\"Header 1\"),\n Head(name=\"Head2\", displayName=\"Header 2\"),\n Head(name=\"Head3\", displayName=\"Header 3\")\n ])\n ) \\\n .add_rows(\n some_list,\n lambda list_element, row: row.add_cells([\n Cell(\n name=\"Head1\",\n type=FieldType.Integer,\n value=list_element['SomeShit']\n ),\n Cell(\n name=\"Head2\",\n type=FieldType.String,\n value=list_element['Nullable']\n ),\n Cell(\n name=\"Head3\",\n type=FieldType.String,\n value=list_element['OtherShit']\n )\n ])\n ) \\\n .to_dict()\n\n # print(dumps(obj, ensure_ascii=False))\n expected = \"{\\\"fn\\\": \\\"Table1\\\", \\\"ft\\\": \\\"Table\\\", \\\"fv\\\": {\\\"th\\\": {\\\"0\\\": {\\\"fn\\\": \\\"Head1\\\", \\\"fv\\\": \\\"Header 1\\\"}, \\\"1\\\": {\\\"fn\\\": \\\"Head2\\\", \\\"fv\\\": \\\"Header 2\\\"}, \\\"2\\\": {\\\"fn\\\": \\\"Head3\\\", \\\"fv\\\": \\\"Header 3\\\"}}, \\\"tb\\\": {\\\"0\\\": {\\\"0\\\": {\\\"fn\\\": \\\"Head1\\\", \\\"ft\\\": \\\"Integer\\\", \\\"fv\\\": \\\"SomeShit1\\\"}, \\\"1\\\": {\\\"fn\\\": \\\"Head2\\\", \\\"ft\\\": \\\"String\\\", \\\"fv\\\": \\\"Nullable1\\\"}, \\\"2\\\": {\\\"fn\\\": \\\"Head3\\\", \\\"ft\\\": \\\"String\\\", \\\"fv\\\": 1}}, \\\"1\\\": {\\\"0\\\": {\\\"fn\\\": \\\"Head1\\\", \\\"ft\\\": \\\"Integer\\\", \\\"fv\\\": \\\"SomeShit2\\\"}, \\\"1\\\": {\\\"fn\\\": \\\"Head2\\\", \\\"ft\\\": \\\"String\\\", \\\"fv\\\": \\\"Nullable2\\\"}, \\\"2\\\": {\\\"fn\\\": \\\"Head3\\\", \\\"ft\\\": \\\"String\\\", \\\"fv\\\": 2}}, \\\"2\\\": {\\\"0\\\": {\\\"fn\\\": \\\"Head1\\\", \\\"ft\\\": \\\"Integer\\\", \\\"fv\\\": \\\"SomeShit2\\\"}, \\\"1\\\": {\\\"fn\\\": \\\"Head2\\\", \\\"ft\\\": \\\"String\\\", \\\"fv\\\": \\\"Nullable2\\\"}, \\\"2\\\": {\\\"fn\\\": \\\"Head3\\\", \\\"ft\\\": \\\"String\\\", \\\"fv\\\": 2}}}}, \\\"fdn\\\": \\\"Таблица 1\\\"}\"\n self.assertEqual(dumps(obj, ensure_ascii=False), expected)\n\n def test_compare_pass(self):\n obj1 = Body(name=\"Table1\").set_header(lambda r: r)\n obj2 = Body(name=\"Table1\").set_header(lambda r: r)\n\n obj1_before = deepcopy(obj1)\n\n obj1.compare(obj2, 200, 100)\n self.assertEqual(obj1.to_dict(), obj1_before.to_dict())\n\n def test_validate_empty(self):\n errors = Body().validate(\"parent\")\n self.assertEqual(len(errors), 2)\n\n def test_validate_head_missing(self):\n errors = Body(name=\"table\").validate(\"parent\")\n self.assertEqual(len(errors), 1)\n\n def test_validate_row_size_mismatch(self):\n errors = Body(name=\"table\") \\\n .set_header(lambda f: f.add_cells([\n Head(name=\"h1\", displayName=\"Head 1\"),\n Head(name=\"h2\", displayName=\"Head 2\")\n ])) \\\n .add_row(lambda r: r.add_cell(Cell(\n name=\"h1\",\n type=FieldType.Date,\n value=1\n ))) \\\n .validate(\"parent\")\n\n self.assertEqual(len(errors), 1)\n\n def test_validate_deep_error(self):\n errors = Body(name=\"table\") \\\n .set_header(lambda f: f.add_cells([\n Head(name=\"h1\", displayName=\"Head 1\"),\n Head(name=\"h2\", displayName=\"Head 2\")\n ])) \\\n .add_row(lambda r: r.add_cells([\n Cell(\n name=\"h1\",\n type=FieldType.Date,\n value=1\n ),\n Cell(\n name=\"h2\",\n value=2\n )\n ])) \\\n .validate(\"parent\")\n\n self.assertEqual(len(errors), 1)\n\n def test_from_dict(self):\n bd1_dict = Body(name=\"table\", displayName=\"Table 1\") \\\n .set_header(lambda f: f.add_cells([\n Head(name=\"h1\", displayName=\"Head 1\"),\n Head(name=\"h2\", displayName=\"Head 2\")\n ])) \\\n .add_row(lambda r: r.add_cells([\n Cell(\n name=\"h1\",\n type=FieldType.Date,\n value=1\n ),\n Cell(\n name=\"h2\",\n type=FieldType.Date,\n value=2\n )\n ])) \\\n .add_row(lambda r: r.add_cells([\n Cell(\n name=\"h1\",\n type=FieldType.Date,\n value=3\n ),\n Cell(\n name=\"h2\",\n type=FieldType.Date,\n value=4\n )\n ])) \\\n .to_dict()\n\n clean_dict = loads(\n dumps(bd1_dict, ensure_ascii=False), encoding='utf-8')\n bd2_dict = Body().from_dict(clean_dict).to_dict()\n\n self.assertDictEqual(bd1_dict, bd2_dict)\n\n\nif __name__ == '__main__':\n unittest.main()\n", "sub_path": "talan.rf/sharedmodel/tests/table/test_body.py", "file_name": "test_body.py", "file_ext": "py", "file_size_in_byte": 7133, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "unittest.TestCase", "line_number": 10, "usage_type": "attribute"}, {"api_name": "module.table.Row", "line_number": 12, "usage_type": "call"}, {"api_name": "module.table.Body", "line_number": 13, "usage_type": "call"}, {"api_name": "module.table.Body", "line_number": 18, "usage_type": "call"}, {"api_name": "module.table.Row", "line_number": 21, "usage_type": "argument"}, {"api_name": "module.table.Body", "line_number": 24, "usage_type": "call"}, {"api_name": "module.table.Row", "line_number": 24, "usage_type": "call"}, {"api_name": "module.table.Row", "line_number": 27, "usage_type": "argument"}, {"api_name": "module.table.Body", "line_number": 30, "usage_type": "call"}, {"api_name": "module.table.Row", "line_number": 33, "usage_type": "argument"}, {"api_name": "module.table.Body", "line_number": 36, "usage_type": "call"}, {"api_name": "module.table.Body", "line_number": 42, "usage_type": "call"}, {"api_name": "module.table.Row", "line_number": 48, "usage_type": "argument"}, {"api_name": "module.table.Body", "line_number": 51, "usage_type": "call"}, {"api_name": "module.table.Row", "line_number": 51, "usage_type": "call"}, {"api_name": "module.table.Body", "line_number": 73, "usage_type": "call"}, {"api_name": "module.table.Head", "line_number": 81, "usage_type": "call"}, {"api_name": "module.table.Head", "line_number": 82, "usage_type": "call"}, {"api_name": "module.table.Head", "line_number": 83, "usage_type": "call"}, {"api_name": "module.table.Cell", "line_number": 89, "usage_type": "call"}, {"api_name": "module.enum.FieldType.Integer", "line_number": 91, "usage_type": "attribute"}, {"api_name": "module.enum.FieldType", "line_number": 91, "usage_type": "name"}, {"api_name": "module.table.Cell", "line_number": 94, "usage_type": "call"}, {"api_name": "module.enum.FieldType.String", "line_number": 96, "usage_type": "attribute"}, {"api_name": "module.enum.FieldType", "line_number": 96, "usage_type": "name"}, {"api_name": "module.table.Cell", "line_number": 99, "usage_type": "call"}, {"api_name": "module.enum.FieldType.String", "line_number": 101, "usage_type": "attribute"}, {"api_name": "module.enum.FieldType", "line_number": 101, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 110, "usage_type": "call"}, {"api_name": "module.table.Body", "line_number": 113, "usage_type": "call"}, {"api_name": "module.table.Body", "line_number": 114, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 116, "usage_type": "call"}, {"api_name": "module.table.Body", "line_number": 122, "usage_type": "call"}, {"api_name": "module.table.Body", "line_number": 126, "usage_type": "call"}, {"api_name": "module.table.Body", "line_number": 130, "usage_type": "call"}, {"api_name": "module.table.Head", "line_number": 132, "usage_type": "call"}, {"api_name": "module.table.Head", "line_number": 133, "usage_type": "call"}, {"api_name": "module.table.Cell", "line_number": 135, "usage_type": "call"}, {"api_name": "module.enum.FieldType.Date", "line_number": 137, "usage_type": "attribute"}, {"api_name": "module.enum.FieldType", "line_number": 137, "usage_type": "name"}, {"api_name": "module.table.Body", "line_number": 145, "usage_type": "call"}, {"api_name": "module.table.Head", "line_number": 147, "usage_type": "call"}, {"api_name": "module.table.Head", "line_number": 148, "usage_type": "call"}, {"api_name": "module.table.Cell", "line_number": 151, "usage_type": "call"}, {"api_name": "module.enum.FieldType.Date", "line_number": 153, "usage_type": "attribute"}, {"api_name": "module.enum.FieldType", "line_number": 153, "usage_type": "name"}, {"api_name": "module.table.Cell", "line_number": 156, "usage_type": "call"}, {"api_name": "module.table.Body", "line_number": 166, "usage_type": "call"}, {"api_name": "module.table.Head", "line_number": 168, "usage_type": "call"}, {"api_name": "module.table.Head", "line_number": 169, "usage_type": "call"}, {"api_name": "module.table.Cell", "line_number": 172, "usage_type": "call"}, {"api_name": "module.enum.FieldType.Date", "line_number": 174, "usage_type": "attribute"}, {"api_name": "module.enum.FieldType", "line_number": 174, "usage_type": "name"}, {"api_name": "module.table.Cell", "line_number": 177, "usage_type": "call"}, {"api_name": "module.enum.FieldType.Date", "line_number": 179, "usage_type": "attribute"}, {"api_name": "module.enum.FieldType", "line_number": 179, "usage_type": "name"}, {"api_name": "module.table.Cell", "line_number": 184, "usage_type": "call"}, {"api_name": "module.enum.FieldType.Date", "line_number": 186, "usage_type": "attribute"}, {"api_name": "module.enum.FieldType", "line_number": 186, "usage_type": "name"}, {"api_name": "module.table.Cell", "line_number": 189, "usage_type": "call"}, {"api_name": "module.enum.FieldType.Date", "line_number": 191, "usage_type": "attribute"}, {"api_name": "module.enum.FieldType", "line_number": 191, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 197, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 198, "usage_type": "call"}, {"api_name": "module.table.Body", "line_number": 199, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 205, "usage_type": "call"}]} +{"seq_id": "543059203", "text": "# coding: utf-8\nfrom django.conf.urls import url\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\nfrom capital.views import ExchangeCapitalView, ProductCapitalView,\\\n VarietyCapitalView, TodayVarietyCapitalView\n\nurlpatterns = [\n url(r'^exchange$', ExchangeCapitalView.as_view()),\n url(r'^product', ProductCapitalView.as_view()),\n url(r'^variety', VarietyCapitalView.as_view()),\n url(r'^today', TodayVarietyCapitalView.as_view()),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n", "sub_path": "src/api/capital/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 512, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.conf.urls.url", "line_number": 9, "usage_type": "call"}, {"api_name": "capital.views.ExchangeCapitalView.as_view", "line_number": 9, "usage_type": "call"}, {"api_name": "capital.views.ExchangeCapitalView", "line_number": 9, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 10, "usage_type": "call"}, {"api_name": "capital.views.ProductCapitalView.as_view", "line_number": 10, "usage_type": "call"}, {"api_name": "capital.views.ProductCapitalView", "line_number": 10, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 11, "usage_type": "call"}, {"api_name": "capital.views.VarietyCapitalView.as_view", "line_number": 11, "usage_type": "call"}, {"api_name": "capital.views.VarietyCapitalView", "line_number": 11, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 12, "usage_type": "call"}, {"api_name": "capital.views.TodayVarietyCapitalView.as_view", "line_number": 12, "usage_type": "call"}, {"api_name": "capital.views.TodayVarietyCapitalView", "line_number": 12, "usage_type": "name"}, {"api_name": "rest_framework.urlpatterns.format_suffix_patterns", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "62746628", "text": "from neuron import h\nfrom matplotlib import pyplot\nh.load_file('stdrun.hoc')\n\n#Create the soma section and define the default parameters\nsoma = h.Section(name='soma')\nsoma.nseg = 1\nsoma.diam = 200\nsoma.L = 100\nsoma.cm = 1.4884e-4/6.2832e-4\n\n#Insert the Hodgkin-Huxley channels and define the conductances\nsoma.insert('hh')\nsoma.gnabar_hh = 0\nsoma.gkbar_hh = 0\nsoma.gl_hh = 2.0e-5\nsoma.el_hh = -70\n\nv_init = h.v_init= -60\n\n#Inject current in the middle of the soma\nstim = h.IClamp(0.5)\nstim.delay = 100\nstim.dur = 500\nstim.amp = 1\n\ntstop = h.tstop = 800 #ms\n\n#++++++++++++++++++++++++++++++++++++++++++++++++++++++++++#\n#Record Results \nh.dt = 0.025\n\nv0_vec = h.Vector()\nt_vec = h.Vector()\n\nv0_vec.record(soma(0.5)._ref_v)\nt_vec.record(h._ref_t)\n\ndef run_simulation(gl_hh = 2.0e-5, gna_hh = 0, gk_hh = 0, cur_inj = 1):\n # Run the simulation\n soma.gl_hh = float(gl_hh)\n soma.gnabar_hh = float(gna_hh)\n soma.gkbar_hh = float(gk_hh)\n stim.amp = float(cur_inj)\n h.run()\n\n # Plot/Visualize Results\n pyplot.figure()\n pyplot.plot(t_vec, v0_vec,'b')\n pyplot.xlim(0, tstop)\n pyplot.xlabel('time (ms)')\n pyplot.ylabel('mV')\n\n pyplot.show()", "sub_path": "SW_Tut_1/Passive_Main.py", "file_name": "Passive_Main.py", "file_ext": "py", "file_size_in_byte": 1170, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "neuron.h.load_file", "line_number": 3, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 3, "usage_type": "name"}, {"api_name": "neuron.h.Section", "line_number": 6, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 6, "usage_type": "name"}, {"api_name": "neuron.h.v_init", "line_number": 19, "usage_type": "attribute"}, {"api_name": "neuron.h", "line_number": 19, "usage_type": "name"}, {"api_name": "neuron.h.IClamp", "line_number": 22, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 22, "usage_type": "name"}, {"api_name": "neuron.h.tstop", "line_number": 27, "usage_type": "attribute"}, {"api_name": "neuron.h", "line_number": 27, "usage_type": "name"}, {"api_name": "neuron.h.dt", "line_number": 31, "usage_type": "attribute"}, {"api_name": "neuron.h", "line_number": 31, "usage_type": "name"}, {"api_name": "neuron.h.Vector", "line_number": 33, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 33, "usage_type": "name"}, {"api_name": "neuron.h.Vector", "line_number": 34, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 34, "usage_type": "name"}, {"api_name": "neuron.h._ref_t", "line_number": 37, "usage_type": "attribute"}, {"api_name": "neuron.h", "line_number": 37, "usage_type": "name"}, {"api_name": "neuron.h.run", "line_number": 45, "usage_type": "call"}, {"api_name": "neuron.h", "line_number": 45, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 48, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 48, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 49, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 49, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlim", "line_number": 50, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 50, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 51, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 51, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 52, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 52, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 54, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 54, "usage_type": "name"}]} +{"seq_id": "163113351", "text": "from django.urls import path, re_path\n\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name=\"index\"),\n path(\"all_lists\", views.all_lists, name=\"all_lists\"),\n path(\"login\", views.login_view, name=\"login\"),\n path(\"logout\", views.logout_view, name=\"logout\"),\n path(\"register\", views.register, name=\"register\"),\n path(\"category/\", views.category, name='category'),\n path(\"/put_in\", views.put_in, name=\"put_in\"),\n path(\"/my_purchases\", views.my_purchases, name=\"my_purchases\"),\n path(\"/my_items\", views.my_items, name=\"my_items\"),\n path(\"/wish_list\", views.show_wishes, name=\"show_wishes\"),\n path(\"/\", views.show_prod, name=\"show_prod\"),\n path(\"//finish_trade\", views.finish_trade, name=\"finish_trade\"),\n path(\"//make_bid\", views.make_bid, name=\"make_bid\"),\n path(\"//add_in_list\", views.add_in_list, name=\"add_in_list\"),\n path(\"//remove_from_list\", views.remove_from_list, name=\"remove_from_list\"),\n path(\"//add_comment\", views.add_comment, name=\"add_comment\"),\n path(\"/notifi\", views.aside_notification, name=\"aside_notification\"),\n path(\"/no_notifi\", views.no_notification, name=\"no_notification\"),\n # path(\"products/\", views.show_prod, name='show_prod')\n]\n", "sub_path": "auctions/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 1515, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 14, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 15, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 20, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 21, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "305741327", "text": "import sys\n\nfrom PyQt5 import QtWidgets,uic,QtCore\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtCore import pyqtSignal,QUrl\nimport numpy as np\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nimport cv2\nfrom PyQt5.QtGui import QIcon, QPixmap\nfrom matplotlib.figure import Figure\nimport matplotlib.pyplot as plt\nfrom PyQt5 import QtGui\nimport requests\nfrom PyQt5.QtWebEngineWidgets import QWebEngineSettings, QWebEngineView, QWebEnginePage\nimport url1\nimport pyautogui\nimport numpy as np\nimport random\n\n\nfrom bs4 import BeautifulSoup\nclass Web(QWebEngineView):\n\n def load(self, url):\n self.setUrl(QUrl(url))\n\n def adjustTitle(self):\n self.setWindowTitle(self.title())\n\n def disableJS(self):\n settings = QWebEngineSettings.globalSettings()\n settings.setAttribute(QWebEngineSettings.JavascriptEnabled, False)\n\ndef draw_circle(event,x,y,flags,param):\n\tif event == cv2.EVENT_LBUTTONDOWN:\n\t\timg = cv2.imread('/home/klepsydra/Python_projects/covid19/Covid-19-Tracker-master/world_map.jpg')\n\n\t\tcv2.circle(img,(x,y),10,(255,0,0),-1)\n\t\tif(x<1079 and x>1068 and y<358 and y>348):\n f=open('m.txt','w')\n f.write('China')\n print('China')\n f.close()\n pyautogui.press('esc')\n\t\telif(x<1001 and x>991 and y<423 and y>415):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('India')\n\t\t\tprint('India')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\t\t\t\n\t\telif(x<264 and x>254 and y<323 and y>313):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('USA')\n\t\t\tprint('USA')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x<1208 and x>1196 and y<494 and y>483):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Indonesia')\n\t\t\tprint('Indonesia')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x<959 and x>946 and y<382 and y>369):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Paskistan')\n\t\t\tprint('Pakistan')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\t\t\t\n\t\telif(x<491 and x>479 and y<536 and y>524):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Brazil')\n\t\t\tprint('Brazil')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>681 and x<696 and y>442 and y<455):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Nigeria')\n\t\t\tprint('Nigeria')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\t\t\t\n\t\telif(x>1042 and x<1057 and y>172 and y<187):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Russia')\n\t\t\tprint('Russia')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>286 and x<301 and y>398 and y<411):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Mexico')\n\t\t\tprint('Mexico')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>1180 and x<1194 and y>370 and y<384):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Japan')\n\t\t\tprint('Japan')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>834 and x<849 and y>486 and y<501):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Ethiopiia')\n\t\t\tprint('Ethiopia')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>726 and x<742 and y>377 and y<391):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Egypt')\n\t\t\tprint('Egypt')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>742 and x<757 and y>512 and y<525):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('DR Congo')\n\t\t\tprint('Dr Congo')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>782 and x<796 and y>333 and y<349):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Turkey')\n\t\t\tprint('Turkey')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>866 and x<883 and y>329 and y<345):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Iran')\n\t\t\tprint('Iran')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>699 and x<714 and y>238 and y<250):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Germany')\n\t\t\tprint('Germany')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>648 and x<661 and y>228 and y<241):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('UK')\n\t\t\tprint('UK')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>665 and x<678 and y>266 and y<280):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('France')\n\t\t\tprint('France')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>729 and x<745 and y>288 and y<302):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Italy')\n\t\t\tprint('Italy')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>750 and x<764 and y>483 and y<499):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Tanzania')\n\t\t\tprint('Tanzania')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>774 and x<787 and y>635 and y<649):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('South Africa')\n\t\t\tprint('South Africa')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>824 and x<839 and y>532 and y<548):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Kenya')\n\t\t\tprint('Kenya')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>1151 and x<1168 and y>397 and y<412):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('South Korea')\n\t\t\tprint('South Korea')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>357 and x<389 and y>480 and y<493):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Colombia')\n\t\t\tprint('Colombia')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>606 and x<622 and y>282 and y<297):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Spain')\n\t\t\tprint('Spain')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>426 and x<441 and y>646 and y<661):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Argentina')\n\t\t\tprint('Argentina')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>652 and x<670 and y>362 and y<377):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Algeria')\n\t\t\tprint('Algeria')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>810 and x<825 and y>448 and y<463):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Sudan')\n\t\t\tprint('Sudan')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>722 and x<734 and y>251 and y<264):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Ukraine')\n\t\t\tprint('Ukraine')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>832 and x<847 and y>359 and y<373):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Iraq')\n\t\t\tprint('Iraq')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>922 and x<935 and y>331 and y<344):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Afghanisthan')\n\t\t\tprint('Afghanisthan')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>728 and x<743 and y>208 and y<222):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Poland')\n\t\t\tprint('Poland')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>237 and x<252 and y>192 and y<210):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Canada')\n\t\t\tprint('Canada')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>679 and x<694 and y>312 and y<326):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Morocco')\n\t\t\tprint('Morocco')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>861 and x<877 and y>411 and y<425):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Saudi Arabia')\n\t\t\tprint('Saudi Arabia')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>912 and x<929 and y>281 and y<296):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Uzbekistan')\n\t\t\tprint('Uzbekistan')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>448 and x<463 and y>462 and y<481):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Peru')\n\t\t\tprint('Peru')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>720 and x<736 and y>552 and y<566):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Angola')\n\t\t\tprint('Angola')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>865 and x<880 and y>432 and y<446):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Yemen')\n\t\t\tprint('Yemen')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\t\t\t\n\t\telif(x>324 and x<342 and y>510 and y<528):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Venezuela')\n\t\t\tprint('Venezuela')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>862 and x<876 and y>560 and y<575):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Madagascar')\n\t\t\tprint('Madagascar')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>1174 and x<1190 and y>574 and y<594):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Australia')\n\t\t\tprint('Australia')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>675 and x<690 and y>414 and y<429):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Niger')\n\t\t\tprint('Niger')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>1008 and x<1026 and y>458 and y<474):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Sri Lanka')\n\t\t\tprint('Sri Lanka')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>627 and x<644 and y>413 and y<428):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Mali')\n\t\t\tprint('Mali')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>785 and x<828 and y>255 and y<264):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Romania')\n\t\t\tprint('Romania')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>408 and x<424 and y>610 and y<623):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Chile')\n\t\t\tprint('Chile')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>915 and x<930 and y>247 and y<266):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Kazakhstan')\n\t\t\tprint('Kazakhstan')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>774 and x<788 and y>565 and y<581):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Zambia')\n\t\t\tprint('Zambia')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>794 and x<810 and y>373 and y<386):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Syria')\n\t\t\tprint('Syria')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>777 and x<791 and y>437 and y<452):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Chad')\n\t\t\tprint('Chad')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>823 and x<837 and y>508 and y<522):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Somalia')\n\t\t\tprint('Somalia')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>662 and x<677 and y>335 and y<348):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Tunisia')\n\t\t\tprint('Tunisia')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>417 and x<434 and y>574 and y<591):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Bolivia')\n\t\t\tprint('Bolivia')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>801 and x<818 and y>288 and y<306):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Greece')\n\t\t\tprint('Greece')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\t\t\t\n\t\telif(x>603 and x<621 and y>309 and y<327):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Portugal')\n\t\t\tprint('Portugal')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>677 and x<693 and y>180 and y<196):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Belarus')\n\t\t\tprint('Belarus')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>771 and x<811 and y>230 and y<238):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Austria')\n\t\t\tprint('Austria')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>900 and x<914 and y>305 and y<322):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Turkmenistan')\n\t\t\tprint('Turkmenistan')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>750 and x<764 and y>402 and y<418):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Libya')\n\t\t\tprint('Libya')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>688 and x<702 and y>221 and y<234):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Denmark')\n\t\t\tprint('Denmark')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>788 and x<805 and y>138 and y<151):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Finland')\n\t\t\tprint('Finland')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>722 and x<738 and y>134 and y<148):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Norway')\n\t\t\tprint('Norway')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>891 and x<908 and y>449 and y<465):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Oman')\n\t\t\tprint('Oman')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>593 and x<611 and y>244 and y<260):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Ireland')\n\t\t\tprint('Ireland')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>1090 and x<1104 and y>282 and y<298):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Mongolia')\n\t\t\tprint('Mongolia')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>743 and x<758 and y>591 and y<607):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Namibia')\n\t\t\tprint('Namibia')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>1279 and x<1295 and y>676 and y<691411):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Newzealand')\n\t\t\tprint('Newzealand')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>771 and x<787 and y>616 and y<631):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Botswana')\n\t\t\tprint('Botswana')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>615 and x<633 and y>113 and y<132):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Iceland')\n\t\t\tprint('Iceland')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>511 and x<528 and y>54 and y<71):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Greenland')\n\t\t\tprint('Greenland')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>714 and x<730 and y>103 and y<117):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Sweden')\n\t\t\tprint('Sweden')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>1066 and x<1080 and y>420 and y<433):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Thailand')\n\t\t\tprint('Thailand')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>993 and x<1005 and y>306 and y<322):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Kyrgyzstan')\n\t\t\tprint('Kyrgyzstan')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\n\t\telif(x>121 and x<139 and y>108 and y<126):\n\t\t\tf=open('m.txt','w')\n\t\t\tf.write('Alaska')\n\t\t\tprint('Alaska')\n\t\t\tf.close()\n\t\t\tpyautogui.press('esc')\n\t\telse:\n\t\t\tprint(\"not valid\")\nclass MplCanvas(FigureCanvasQTAgg):\n\tdef __init__(self, parent=None, width=5, height=4, dpi=100):\n\t\tf, ax = plt.subplots(figsize=(18,5)) \n\t\tplt.bar([1,2,3], [global1.confirmed,global1.death,global1.recovered],label='', color=(1,0.274509804,0.466666667,1))\n\t\tf.set_facecolor(color=(0.156862745,0.129411765,0.250980392,0.588235294))\n\t\tax.set_facecolor(color=(0.156862745,0.129411765,0.250980392,0.588235294))\n\t\tax.tick_params(axis='x', colors=(0.301960784,0.301960784,0.392156863,1))\n\t\tax.tick_params(axis='y', colors=(0.301960784,0.301960784,0.392156863,1))\n\n\t\n\t\tax.legend(fontsize = 14)\n\t\tax.set_xticklabels(('','','confirmed Cases','','Deaths','','Recovered Cases'))\n\t\tsuper(MplCanvas,self).__init__(f)\nclass dash_board(QtWidgets.QMainWindow):\n\tsig2g = QtCore.pyqtSignal()\n\tsig2c = QtCore.pyqtSignal()\n\tsig2ck = QtCore.pyqtSignal()\n\tsig2i = QtCore.pyqtSignal()\n\tdef __init__(self):\n\t\tsuper(dash_board,self).__init__()\n\t\tuic.loadUi(\"dash.ui\",self)\n\t\tStyleSheet = '''\n\t\tQPushButton{\n\t\tborder-width: 2px;\n\t\tborder-radius:6px;\n\t\tfont: 57 16pt \"Ubuntu\";\n\t\tbackground-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:1 rgba(235, 137, 39, 255));\n\n\t\t}\n\t\tQPushButton:hover {\n \tbackground-color: #64b5f6;\n \tcolor: #fff;\n\t\t}\n\t\t'''\n\t\tStyleSheet2 = '''\n\t\tQPushButton{\n\t\tcolor: rgb(255,255,255);\n\t\tfont: 75 20pt \"Ubuntu\";\n\n\t\t}\n\t\tQPushButton:hover {\n \tcolor: rgb(0,255,0);\n\t\tfont: 75 20pt \"Ubuntu\";\n\n\t\t}\n\t\t'''\n\t\tStyleSheet3 = '''\n\t\tQPushButton{\n\t\tcolor: rgb(255,255,255);\n\t\tfont: 30pt \"Webdings\";\n\n\n\t\t}\n\t\tQPushButton:hover {\n\t\tcolor: rgb(0,255,0);\n\t\tfont: 30pt \"Webdings\";\n\t\t}\n\t\t'''\n\t\tStyleSheet4 = '''\n\t\tQPushButton{\n\t\tcolor: rgb(255,255,255);\n\t\tfont: 30pt \"Webdings\";\n\n\n\t\t}\n\t\tQPushButton:hover {\n\t\tcolor: rgb(255,0,0);\n\t\tfont: 30pt \"Webdings\";\n\t\t}\n\t\t'''\n\t\tStyleSheet5 = '''\n\t\tQLabel{\n\t\tbackground-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:1 rgba(0, 0, 0, 0));\n\t\tfont: 75 10pt \"Ubuntu\";\n\t\tcolor:rgb(255, 255, 255);\n\n\t\t}\n\t\tQLabel:hover {\n\t\tbackground-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:1 rgba(0, 0, 0, 0));\n\t\tfont: 75 10pt \"Ubuntu\";\n\t\tcolor: rgb(150, 150, 150);\n\t\t}\n\t\t'''\n\t\tStyleSheet6 = '''\n\t\tQTextBrowser{\n\t\tbackground-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:1 rgba(0, 0, 0, 0)););\n\n\t\t}\n\t\tQLabel:hover {\n\t\tbackground-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:1 rgba(0, 0, 0, 0));\n\t\tfont: 75 10pt \"Ubuntu\";\n\t\tcolor: rgb(150, 150, 150);\n\t\t}\n\t\t'''\n\n\t\tself.p1.setStyleSheet(StyleSheet)\n\t\tself.p2.setStyleSheet(StyleSheet)\n\t\tself.p3.setStyleSheet(StyleSheet)\n\t\tself.p4.setStyleSheet(StyleSheet)\n\t\tself.p11.setStyleSheet(StyleSheet2)\n\t\tself.p22.setStyleSheet(StyleSheet3)\n\t\tself.p33.setStyleSheet(StyleSheet3)\n\t\tself.p44.setStyleSheet(StyleSheet3)\n\t\tself.quit.setStyleSheet(StyleSheet4)\n\n\t\tself.l1.setStyleSheet(StyleSheet5)\n\t\tself.l2.setStyleSheet(StyleSheet5)\n\t\tself.l3.setStyleSheet(StyleSheet5)\n\t\tself.l4.setStyleSheet(StyleSheet5)\n\t\tself.l5.setStyleSheet(StyleSheet5)\n\t\tself.l6.setStyleSheet(StyleSheet5)\n\t\tself.l7.setStyleSheet(StyleSheet5)\n\t\tself.l8.setStyleSheet(StyleSheet5)\n\t\tself.l9.setStyleSheet(StyleSheet5)\n\t\tself.l10.setStyleSheet(StyleSheet5)\n\t\tself.l11.setStyleSheet(StyleSheet5)\n\t\tself.l12.setStyleSheet(StyleSheet5)\n\t\t\n\t\tself.p1.clicked.connect(self.toglob)\n\t\tself.p11.clicked.connect(self.toglob)\n\t\tself.p2.clicked.connect(self.tocnt)\n\t\tself.p22.clicked.connect(self.tocnt)\n\t\tself.p3.clicked.connect(self.tocheck)\n\t\tself.p33.clicked.connect(self.tocheck)\n\t\tself.p4.clicked.connect(self.toinf)\n\t\tself.p44.clicked.connect(self.toinf)\n\t\tself.quit.clicked.connect(self.quit1)\n\t\tself.show()\n\tdef quit1(self):\n\t\tsys.exit()\n\tdef toinf(self):\n\t\tself.sig2i.emit()\n\tdef toglob(self):\n\t\tself.sig2g.emit()\n\tdef tocnt(self):\n\t\tself.sig2c.emit()\n\tdef tocheck(self):\n\t\tself.sig2ck.emit()\nimport global1\nclass main_window(QtWidgets.QMainWindow):\n\n\tsigc=QtCore.pyqtSignal()\n\tsigi = QtCore.pyqtSignal()\n\tsigcc = QtCore.pyqtSignal()\n\tmap =QtCore.pyqtSignal()\n\tdef __init__(self):\n\t\tsuper(main_window,self).__init__()\n\t\tuic.loadUi('main_window1.ui',self)\n\t\tStyleSheet = '''\n\t\tQPushButton{\n\t\tcolor: rgb(255, 255, 255);\n\t\tborder-width: 2px;\n\t\tborder-radius:15px;\n\t\tfont: 75 15pt \"Ubuntu\";\n\t\tbackground-color: qlineargradient(spread:pad, x1:0, y1:0, x2:1, y2:0, stop:1 rgba(77, 70, 100, 255));\n\n\t\t}\n\t\tQPushButton:hover {\n \tbackground-color: #64b5f6;\n \tcolor: #fff;\n\t\t}\n\t\t'''\n\t\tStyleSheet2 = '''\n\t\tQPushButton{\n\t\tcolor: rgb(255,255,255);\n\t\tfont: 75 20pt \"Ubuntu\";\n\n\t\t}\n\t\tQPushButton:hover {\n \tcolor: rgb(0,255,0);\n\t\tfont: 75 20pt \"Ubuntu\";\n\n\t\t}\n\t\t'''\n\t\tStyleSheet3 = '''\n\t\tQPushButton{\n\t\tcolor: rgb(255,255,255);\n\t\tfont: 30pt \"Webdings\";\n\n\n\t\t}\n\t\tQPushButton:hover {\n\t\tcolor: rgb(0,255,0);\n\t\tfont: 30pt \"Webdings\";\n\t\t}\n\t\t'''\n\t\tStyleSheet4 = '''\n\t\tQLabel{\n\t\tfont: 57 26pt \"Ubuntu\";\n\t\tcolor: rgb(150, 22, 41);;\n\n\n\t\t}\n\t\tQLabel:hover {\n\t\tfont: 57 37pt \"Ubuntu\";\n\t\tcolor: rgb(150, 150, 41);;\n\n\t\t}\n\n\n\n\t\t'''\n\t\tself.cc.setStyleSheet(StyleSheet)\n\t\tself.cp.setStyleSheet(StyleSheet)\n\t\tself.cp_2.setStyleSheet(StyleSheet)\n\t\tself.hmb.setStyleSheet(StyleSheet2)\n\t\tself.bt.setStyleSheet(StyleSheet3)\n\t\tself.inn.setStyleSheet(StyleSheet3)\n\t\tself.cn.setStyleSheet(StyleSheet3)\n\t\tself.confirmed.setStyleSheet(StyleSheet4)\n\t\tself.death.setStyleSheet(StyleSheet4)\n\t\tself.recovered.setStyleSheet(StyleSheet4)\n\t\t\n\n\n\t\tlayout = QtWidgets.QVBoxLayout(self.widget)\n\n\n\t\tif(url1.k==0):\n\t\t\tself.confirmed.setText(\"NETWORK ERROR\")\n\t\t\tself.death.setText(str(\"NETWORK ERROR\"))\n\t\t\tself.recovered.setText(str(\"NETWORK ERROR\"))\n\t\telse:\n\t\t\tself.confirmed.setText(str(global1.confirmed))\n\t\t\tself.death.setText(str(global1.death))\n\t\t\tself.recovered.setText(str(global1.recovered))\n\t\t\tself.cc.clicked.connect(self.tocc)\n\t\t\tsc = MplCanvas(self)\n \n\t\t\tlayout.addWidget(sc)\n\t\t\tself.cp.clicked.connect(self.tocountry)\n\t\t\t#self.inb.clicked.connect(self.toinfo)\n\t\t\tself.show()\n\n\n\n\n\tdef tocc(self):\n\n\t\tself.sigcc.emit()\n\tdef toinfo(self):\n\t\tself.sigi.emit()\n\n\tdef tocountry(self):\n\t\tself.sigc.emit()\n\t\tself.map.emit()\n\nclass c_window(QtWidgets.QMainWindow):\n\tdef __init__(self):\n\t\tsuper(c_window,self).__init__()\n\t\tuic.loadUi('c_window1.ui',self)\n\t\tself.figure = plt.figure()\n\t\tself.canvas = FigureCanvas(self.figure)\n\t\tself.toolbar = NavigationToolbar(self.canvas, self)\n\t\tlayout1 =QtWidgets.QVBoxLayout(self.wid1)\n\t\tlayout1.addWidget(self.canvas)\n\t\tlayout1.addWidget(self.toolbar)\n\t\tself.mp.clicked.connect(self.tomp)\n\t\t\n\n\n\t\t#f = open(\"country.txt\",\"rb\")\n\t\t#for line in f:\n\t\t#\tcstr=str(line.strip())\n\t\t#\tself.cb.addItem(str(cstr[2:-1]))\n\t\t#f.close()\n\t\t\n\n\t\tself.show()\n\n\n\tdef tomp(self):\n\t\timg = cv2.imread('/home/klepsydra/Python_projects/covid19/Covid-19-Tracker-master/world_map.jpg')\n\t\tcv2.namedWindow('select_country', cv2.WINDOW_NORMAL)\n\t\tcv2.setMouseCallback('select_country',draw_circle)\n\t\tcv2.setWindowProperty ('select_country', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\t\twhile(1):\n\t\t\tcv2.imshow('select_country',img)\n\t\t\tk = cv2.waitKey(20) & 0xFF\n\t\t\tif k == 27:\n\t\t\t\tbreak\n\t\tcv2.destroyAllWindows()\n\t\tQtWidgets.QApplication.setOverrideCursor(Qt.WaitCursor)\n\t\tf= open(\"m.txt\",\"r\")\n\t\tcnt=str(f.read())\n\t\tprint(cnt)\n\t\tf.close()\n\t\tself.tag.setText(cnt)\n\t\tcountry=cnt\n \t\n\t\turl = r'https://www.worldometers.info/coronavirus/'\n\t\tresource = requests.get(url).text\n\t\tsouped_resource = BeautifulSoup(resource, \"html.parser\")\n\t\tall_tr = souped_resource.find_all('tr')\n\t\tflag = 0\n\t\tcount = 0\n\t\trdata = []\n\t\tfor tr in all_tr:\n\t\t\tall_td = tr.find_all('td')\n\t\t\tfor td in all_td:\n\t\t\t\tif flag == 1:\n\t\t\t\t\trdata.append(td.string)\n\t\t\t\t\tcount += 1\n\t\t\t\t\tif count > 10:\n\t\t\t\t\t\tbreak\n\t\t\t\ttry:\n\t\t\t\t\tif td.string.startswith(country):\n\t\t\t\t\t\tflag = 1\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\t\t\tif count > 10:\n\t\t\t\tbreak\n\t\tdatar = [str(x) for x in rdata]\n\n\n\t\t#info = requests.get(request_url)\n\t\t#dataq = info.json()\n\t\t#data = info.json()[-1]\n\t\t#for case in dataq:\n\t\t\t#x.append(case['Date'][:10])\n\t\t\t#y.append(case['Confirmed'])\n\t\t\t#z.append(case['Deaths'])\n\t\t\t#r.append(case['Recovered'])\n\t\t#coordinates = dict(lat = data['Lat'],lon = data['Lon'])\n\t\t#recovered = data['Recovered']\n\t\t#active = data['Active']\n\t\t#country_code = data['CountryCode']\n\t\t#last_updated = data['Date'][:10]\n\t\t\n\t\tself.det.setText(str(datar[2]))\n\t\tself.con.setText(str(datar[0]))\n\t\tself.reco.setText(str(datar[4]))\n\t\t#self.cc.setText(str(country_code))\n\t\t#self.ac.setText(str(active))\n\t\tself.nc.setText(str(datar[1]))\n\t\tself.nd.setText(str(datar[3]))\n\t\tself.tt.setText(str(datar[9]))\n\t\tself.cpm.setText(str(datar[7]))\n\t\tself.dpm.setText(str(datar[8]))\n\t\tself.tpm.setText(str(datar[10]))\n\n\t\turl = None\n\t\twith open('countryurl.txt') as file:\n\t\t\tfor line in file:\n\t\t\t\tif line.startswith(country):\n\t\t\t\t\turl = line.strip().split()[-1]\n\t\tif url is None:\n\t\t\tprint(\"Invalid Name\")\n\t\t\tquit()\n\t\ttry:\n\t\t\tinfo = requests.get('https://www.worldometers.info/coronavirus/' + url).text\n\t\texcept:\n\t\t\tprint(\"BAD Connection\")\n\n\n\n\n\t\tlines = info.split('\\n')\n\t\tfor line in lines:\n\t\t\tif line.strip().startswith('categories: ['):\n\t\t\t\tdates = line.split(',')\n\t\t\t\tbreak\n\t\tdates[0] = dates[0].strip()[13:]\n\t\tdel(dates[-1])\n\t\tdates[-1] = dates[-1][:8]\n\t\tnew_dates=[]\n\t\tfor date in dates:\n\t\t\tnew_date = ''\n\t\t\tfor c in date:\n\t\t\t\tif c.isalnum():\n\t\t\t\t\tnew_date += c\n\t\t\tnew_dates.append(new_date)\n\n\n\n\t\tflag = 0\n\t\tfor line in lines:\n\t\t\tif flag == 1:\n\t\t\t\tif line.strip().startswith(\"data:\"):\n\t\t\t\t\tconfirmed_y = line.split(',')\n\t\t\t\t\tbreak\n\t\t\tif line.strip().startswith(\"text: 'Total Coronavirus Cases'\"):\n\t\t\t\tflag = 1\n\t\tconfirmed_y[0] = confirmed_y[0].strip()[7:]\n\t\tdel(confirmed_y[-1])\n\n\t\tnew_y=[]\n\t\tfor value in confirmed_y:\n\t\t\tnew_value = ''\n\t\t\tfor c in value:\n\t\t\t\tif c.isalnum():\n\t\t\t\t\tnew_value += c\n\t\t\tnew_y.append(new_value)\n\n\t\tconfirmed_y = [int(x) for x in new_y]\n\t\tprint(confirmed_y)\n\n\t\tflag = 0\n\t\tfor line in lines:\n\t\t\tif flag == 1:\n\t\t\t\tif line.strip().startswith(\"data:\"):\n\t\t\t\t\tdeath_y = line.split(',')\n\t\t\t\t\tbreak\n\t\t\tif line.strip().startswith(\"text: 'Total Coronavirus Deaths'\"):\n\t\t\t\tflag = 1\n\t\tdeath_y[0] = death_y[0].strip()[7:]\n\t\tdel(death_y[-1])\n\n\t\tnew_death_y=[]\n\t\tfor value in death_y:\n\t\t\tnew_value = ''\n\t\t\tfor c in value:\n\t\t\t\tif c.isalnum():\n\t\t\t\t\tnew_value += c\n\t\t\tnew_death_y.append(new_value)\n\n\t\tdeath_y = [int(x) for x in new_death_y]\n\t\tself.figure.clear()\n\t\tax = self.figure.add_subplot(111)\n\t\t#datam = y\n\t\t#data1 = x\n\t\t#self.figure.clear()\n\t\t#ax = self.figure.add_subplot(111)\n\t\t#ax1=self.figure.add_subplot()\n\t#\tax2=self.figure.add_subplot()\n\t\tplt.xlabel(\"Date\")\n\t\tplt.ylabel(\"Number of people\")\n\t\tfor tick in ax.xaxis.get_major_ticks():\n\t\t\ttick.label.set_rotation('vertical')\n\t\t\ttick.label.set_fontsize(5)\n\t\tax.tick_params(axis='x', colors='white')\n\t\tax.tick_params(axis='y', colors='white')\n\t\tax.xaxis.label.set_color('white')\n\t\tax.xaxis.label.set_color('white')\n\t#\tax1.plot(x,z,'*-',label=\"Deaths\")\n\t\tax.plot(new_dates,death_y,label=\"Death Toll\")\n\t\tax.plot(new_dates,confirmed_y,label=\"Confirmed Cases\")\n\t\tax.set_facecolor(color=(0.101960784,0.101960784,0.109803922,1))\n\t\tself.figure.set_facecolor(color=(0.101960784,0.101960784,0.109803922,1))\n\t#\tax2.plot(x,r,'*-',label=\"Recovered cases\")\n\t#\tax2.legend()\n\t\tax.legend()\n\t#\tax1.legend()\n\n\n\t\tself.canvas.draw()\n\t\tQtWidgets.QApplication.restoreOverrideCursor()\n\n\n\t\t\t\nclass info(QtWidgets.QMainWindow):\n\tbackh=QtCore.pyqtSignal()\n\tdef __init__(self):\n\t\tsuper(info,self).__init__()\n\t\tuic.loadUi(\"info1.ui\",self)\n\n\t\tself.bh.clicked.connect(self.back)\n\t\tself.layout3 =QtWidgets.QVBoxLayout(self.wid5)\n\t\tself.web = Web()\n\t\tself.layout3.addWidget(self.web)\n\t\tself.web.load(\"https://meenakshi2604.github.io/Covid-tracker/\")\n\t\tself.count=0\n\t\tself.web.show()\n\t\tself.n.clicked.connect(self.next)\n\t\tself.n_2.clicked.connect(self.back1)\n\t\tself.ur=[\"https://meenakshi2604.github.io/Covid-tracker/\",\"https://www.youtube.com/embed/BtN-goy9VOY\",\"https://www.who.int/\",\"https://github.com/Niranjanprof/Covid-19-Tracker\"]\n\t\tself.show()\n\t\tur2 = [\"WEBSITE\",\"AWARNESS VIDEO\",\"WHO WEBSITE\",\"GITHUB PAGE\"]\n\tdef change(self):\n\t\tif(self.ind== 0):\n\t\t\tself.l2.setText(\"WEBSITE\")\n\t\t\tself.l3.setText(\"AWARNESS VIDEO\")\n\t\t\tself.l4.setText(\"WHO WEBSITE\")\n\t\t\tself.l1.setText(\"GITHUB PAGE\")\n\t\tif(self.ind== 1):\n\t\t\tself.l2.setText(\"AWARNESS VIDEO\")\n\t\t\tself.l3.setText(\"WHO WEBSITE\")\n\t\t\tself.l4.setText(\"GITHUB PAGE\")\n\t\t\tself.l1.setText(\"WEBSITE\")\n\t\tif(self.ind == 2):\n\t\t\tself.l2.setText(\"WHO WEBSITE\")\n\t\t\tself.l3.setText(\"GITHUB PAGE\")\n\t\t\tself.l4.setText(\"WEBSITE\")\n\t\t\tself.l1.setText(\"AWARNESS VIDEO\")\n\t\tif(self.ind ==3):\n\t\t\tself.l2.setText(\"GITHUB PAGE\")\n\t\t\tself.l3.setText(\"WEBSITE\")\n\t\t\tself.l4.setText(\"AWARNESS VIDEO\")\n\t\t\tself.l1.setText(\"WHO WEBSITE\")\n\tdef back1(self):\n\t\tif(self.count == 0):\n\t\t\tself.count = 4\n\t\tself.ind = self.count-1\t\n\t\tself.web.load(self.ur[self.ind])\n\t\tself.change()\n\t\tself.count -= 1\n\n\tdef next(self):\n\t\tif(self.count == 3):\n\t\t\tself.count = -1\n\t\tself.ind = self.count+1\n\t\tself.web.load(self.ur[self.ind])\n\t\tself.change()\n\t\tself.count += 1\n\n\n\t\n\n\tdef back(self):\n\t\tself.backh.emit()\nclass c_checker(QtWidgets.QMainWindow):\n\tdef __init__(self):\n\t\tsuper(c_checker,self).__init__()\n\t\tuic.loadUi(\"c_check.ui\",self)\n\t\tself.show()\n\t\tlayout = QtWidgets.QVBoxLayout(self.widget_5)\n\t\tweb = Web()\n\t\tlayout.addWidget(web)\n\t\tweb.load(\"https://evgeny-nadymov.github.io/telegram-react/\")\n\t\t\nclass controller():\n\tdef __init__(self):\n\t\tpass\n\tdef dash(self):\n\t\tself.dwin = dash_board()\n\t\tself.dwin.show()\n\t\tself.dwin.sig2g.connect(self.show_main_window)\n\t\tself.dwin.sig2c.connect(self.toc)\n\t\tself.dwin.sig2ck.connect(self.checker)\n\t\tself.dwin.sig2i.connect(self.infop)\n\t\tself.dwin.setWindowTitle(\"Welcome To Viscot\")\n\n\n\tdef show_main_window(self):\n\t\tself.win1 = main_window()\n\t\tself.win1.setWindowTitle(\"COVID TRACKER\")\n\t\tself.win1.show()\n\t\tself.dwin.hide()\n\t\tself.win1.sigi.connect(self.infop)\n\t\tself.win1.sigc.connect(self.mapper)\n\t\tself.win1.sigcc.connect(self.checker)\n\t\tself.win1.map.connect(self.mapper)\n\tdef toc(self):\n\t\tself.c_win=c_window()\n\t\ttry:\n\t\t\tself.dwin.hide()\n\t\texcept:\n\t\t\tself.win1.hide()\n\t\tself.c_win.setWindowTitle(\"Country_Wise Results\")\n\t\tself.c_win.show()\n\t\tprint\n\n\tdef mapper(self):\n\t\t\n\t\tself.c_win.tomp()\n\tdef checker(self):\n\t\tself.checkw = c_checker()\n\t\tself.checkw.show()\n\t\ttry:\n\t\t\tself.win1.hide()\n\t\texcept:\n\t\t\tself.dwin.hide()\n\t\tself.checkw.setWindowTitle(\"Viscot-bot\")\n\tdef infop(self):\n\t\tself.win2=info()\n\t\tself.win2.show()\n\t\tself.win2.setWindowTitle(\"INFO PAGE\")\n\t\tself.win2.backh.connect(self.backhm)\n\t\ttry:\n\t\t\tself.win1.hide()\n\t\texcept:\n\t\t\tself.dwin.hide()\n\t\t\n\tdef backhm(self):\n\t\tself.win2.close()\n\t\t\n\t\tself.win1.show()\n\tdef toc(self):\n\t\tself.c_win=c_window()\n\t\tself.c_win.setWindowTitle(\"Country_Wise Results\")\n\t\tself.c_win.show()\n\t\tprint\n\napp= QtWidgets.QApplication(sys.argv)\nctr = controller()\nctr.dash()\nsys.exit(app.exec_())\n\n", "sub_path": "NAES/Anes.py", "file_name": "Anes.py", "file_ext": "py", "file_size_in_byte": 27258, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "PyQt5.QtWebEngineWidgets.QWebEngineView", "line_number": 24, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.QUrl", "line_number": 27, "usage_type": "call"}, {"api_name": "PyQt5.QtWebEngineWidgets.QWebEngineSettings.globalSettings", "line_number": 33, "usage_type": "call"}, {"api_name": "PyQt5.QtWebEngineWidgets.QWebEngineSettings", "line_number": 33, "usage_type": "name"}, {"api_name": "PyQt5.QtWebEngineWidgets.QWebEngineSettings.JavascriptEnabled", "line_number": 34, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWebEngineWidgets.QWebEngineSettings", "line_number": 34, "usage_type": "name"}, {"api_name": "cv2.EVENT_LBUTTONDOWN", "line_number": 37, "usage_type": "attribute"}, {"api_name": "cv2.imread", "line_number": 38, "usage_type": "call"}, {"api_name": "cv2.circle", "line_number": 40, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 46, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 52, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 59, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 66, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 73, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 80, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 87, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 94, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 101, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 108, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 115, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 122, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 129, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 136, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 143, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 150, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 157, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 164, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 171, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 178, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 185, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 192, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 199, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 206, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 213, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 220, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 227, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 234, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 241, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 248, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 255, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 262, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 269, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 276, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 283, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 290, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 297, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 304, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 311, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 318, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 325, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 332, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 339, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 346, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 353, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 360, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 367, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 374, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 381, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 388, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 395, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 402, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 409, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 416, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 423, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 430, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 437, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 444, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 451, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 458, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 465, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 472, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 479, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 486, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 493, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 500, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 507, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 514, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 521, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 528, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 535, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 542, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 549, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 556, "usage_type": "call"}, {"api_name": "pyautogui.press", "line_number": 563, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "line_number": 566, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 568, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 568, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 569, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 569, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 579, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 579, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 580, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 580, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 581, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 581, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 582, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 582, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 583, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 583, "usage_type": "name"}, {"api_name": "PyQt5.uic.loadUi", "line_number": 586, "usage_type": "call"}, {"api_name": "PyQt5.uic", "line_number": 586, "usage_type": "name"}, {"api_name": "sys.exit", "line_number": 695, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 705, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 705, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 707, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 707, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 708, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 708, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 709, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 709, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 710, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 710, "usage_type": "name"}, {"api_name": "PyQt5.uic.loadUi", "line_number": 713, "usage_type": "call"}, {"api_name": "PyQt5.uic", "line_number": 713, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 781, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 781, "usage_type": "name"}, {"api_name": "url1.k", "line_number": 784, "usage_type": "attribute"}, {"api_name": "global1.confirmed", "line_number": 789, "usage_type": "attribute"}, {"api_name": "global1.death", "line_number": 790, "usage_type": "attribute"}, {"api_name": "global1.recovered", "line_number": 791, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 813, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 813, "usage_type": "name"}, {"api_name": "PyQt5.uic.loadUi", "line_number": 816, "usage_type": "call"}, {"api_name": "PyQt5.uic", "line_number": 816, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 817, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 817, "usage_type": "name"}, {"api_name": "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "line_number": 818, "usage_type": "call"}, {"api_name": "matplotlib.backends.backend_qt5agg.NavigationToolbar2QT", "line_number": 819, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 820, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 820, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 838, "usage_type": "call"}, {"api_name": "cv2.namedWindow", "line_number": 839, "usage_type": "call"}, {"api_name": "cv2.WINDOW_NORMAL", "line_number": 839, "usage_type": "attribute"}, {"api_name": "cv2.setMouseCallback", "line_number": 840, "usage_type": "call"}, {"api_name": "cv2.setWindowProperty", "line_number": 841, "usage_type": "call"}, {"api_name": "cv2.WND_PROP_FULLSCREEN", "line_number": 841, "usage_type": "attribute"}, {"api_name": "cv2.WINDOW_FULLSCREEN", "line_number": 841, "usage_type": "attribute"}, {"api_name": "cv2.imshow", "line_number": 843, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 844, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 847, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication.setOverrideCursor", "line_number": 848, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 848, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 848, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.Qt.WaitCursor", "line_number": 848, "usage_type": "attribute"}, {"api_name": "PyQt5.QtCore.Qt", "line_number": 848, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 857, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 858, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 916, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 991, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 991, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 992, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 992, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication.restoreOverrideCursor", "line_number": 1012, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1012, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 1012, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 1016, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 1016, "usage_type": "name"}, {"api_name": "PyQt5.QtCore.pyqtSignal", "line_number": 1017, "usage_type": "call"}, {"api_name": "PyQt5.QtCore", "line_number": 1017, "usage_type": "name"}, {"api_name": "PyQt5.uic.loadUi", "line_number": 1020, "usage_type": "call"}, {"api_name": "PyQt5.uic", "line_number": 1020, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 1023, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 1023, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QMainWindow", "line_number": 1076, "usage_type": "attribute"}, {"api_name": "PyQt5.QtWidgets", "line_number": 1076, "usage_type": "name"}, {"api_name": "PyQt5.uic.loadUi", "line_number": 1079, "usage_type": "call"}, {"api_name": "PyQt5.uic", "line_number": 1079, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QVBoxLayout", "line_number": 1081, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 1081, "usage_type": "name"}, {"api_name": "PyQt5.QtWidgets.QApplication", "line_number": 1149, "usage_type": "call"}, {"api_name": "PyQt5.QtWidgets", "line_number": 1149, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 1149, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 1152, "usage_type": "call"}]} +{"seq_id": "226864788", "text": "\"\"\"\nDefinition of urls for Djangular.\n\"\"\"\n\n\n# Uncomment the next lines to enable the admin:\n\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom django.views.generic import TemplateView\n\nfrom rest_framework_jwt.views import refresh_jwt_token, verify_jwt_token\n\n# from rest_framework_jwt.views import obtain_jwt_token\n#from django.views.decorators.csrf import ensure_csrf_cookie #use this on the login page later to protect it from csrf attacks\n\n #admin.autodiscover()\n\nurlpatterns = [\n # Examples:\n\n url(r'^admin/', admin.site.urls),\n url(r'^$', TemplateView.as_view(template_name=\"scrumboard/home.html\")),\n\n url(r'^scrumboard/', include('scrumboard.urls')),\n url(r'^profile/', include('profiles.urls')),\n url(r'^lesson/', include('lessons.urls')),\n url(r'^docs/', include('rest_framework_docs.urls')),\n # url(r'^user/', include('authentication.urls')),\n url(r'^auth/', include('rest_auth.urls')),\n url(r'^auth/refresh-token', refresh_jwt_token),\n url(r'^auth/verify-token', verify_jwt_token),\n url(r'^auth/registration', include('registration.urls')),\n # url(r'^auth/', include('rest_framework_social_oauth2.urls')),\n # url(r'^auth/', include('rest_framework_social_oauth2.urls', namespace='social')),\n\n# url(r'^api/', include('api.urls')),\n # url(r'^docs/', include('rest_framework_docs.urls')),\n \n\n\n # url(r'^auth_api/', include('auth_api.urls')),\n # url(r'^admin/', admin.site.urls),\n\n # url(r'^scrum$', scrumboard.)\n\n #url(r'^$', app.views.home, name='home'),\n #url(r'^contact$', app.views.contact, name='contact'),\n #url(r'^about', app.views.about, name='about'),\n #url(r'^login/$',\n # django.contrib.auth.views.login,\n # {\n # 'template_name': 'app/login.html',\n # 'authentication_form': app.forms.BootstrapAuthenticationForm,\n # 'extra_context':\n # {\n # 'title': 'Log in',\n # 'year': datetime.now().year,\n # }\n # },\n # name='login'),\n #url(r'^logout$',\n # django.contrib.auth.views.logout,\n # {\n # 'next_page': '/',\n # },\n # name='logout'),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n]\n", "sub_path": "Djangular/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 2426, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 22, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 22, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView.as_view", "line_number": 23, "usage_type": "call"}, {"api_name": "django.views.generic.TemplateView", "line_number": 23, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 25, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 26, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 27, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 28, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 30, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 31, "usage_type": "call"}, {"api_name": "rest_framework_jwt.views.refresh_jwt_token", "line_number": 31, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 32, "usage_type": "call"}, {"api_name": "rest_framework_jwt.views.verify_jwt_token", "line_number": 32, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 33, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "12135432", "text": "# Copyright 2020 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nfrom functools import partial\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\ndef get_inplanes():\n return [64, 128, 256, 512]\n\n\ndef conv3x3x3(in_planes, out_planes, stride=1):\n return nn.Conv3d(in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=1,\n bias=False)\n\n\ndef conv1x1x1(in_planes, out_planes, stride=1):\n return nn.Conv3d(in_planes,\n out_planes,\n kernel_size=1,\n stride=stride,\n bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, in_planes, planes, stride=1, downsample=None):\n super().__init__()\n\n self.conv1 = conv3x3x3(in_planes, planes, stride)\n self.bn1 = nn.BatchNorm3d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3x3(planes, planes)\n self.bn2 = nn.BatchNorm3d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, in_planes, planes, stride=1, downsample=None):\n super().__init__()\n\n self.conv1 = conv1x1x1(in_planes, planes)\n self.bn1 = nn.BatchNorm3d(planes)\n self.conv2 = conv3x3x3(planes, planes, stride)\n self.bn2 = nn.BatchNorm3d(planes)\n self.conv3 = conv1x1x1(planes, planes * self.expansion)\n self.bn3 = nn.BatchNorm3d(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n\n def __init__(self,\n block,\n layers,\n block_inplanes,\n n_input_channels=3,\n conv1_t_size=7,\n conv1_t_stride=1,\n no_max_pool=False,\n shortcut_type='B',\n widen_factor=1.0,\n n_classes=400):\n super().__init__()\n\n block_inplanes = [int(x * widen_factor) for x in block_inplanes]\n\n self.in_planes = block_inplanes[0]\n self.no_max_pool = no_max_pool\n\n self.conv1 = nn.Conv3d(n_input_channels,\n self.in_planes,\n kernel_size=(conv1_t_size, 7, 7),\n stride=(conv1_t_stride, 2, 2),\n padding=(conv1_t_size // 2, 3, 3),\n bias=False)\n self.bn1 = nn.BatchNorm3d(self.in_planes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool3d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, block_inplanes[0], layers[0],\n shortcut_type)\n self.layer2 = self._make_layer(block,\n block_inplanes[1],\n layers[1],\n shortcut_type,\n stride=2)\n self.layer3 = self._make_layer(block,\n block_inplanes[2],\n layers[2],\n shortcut_type,\n stride=2)\n self.layer4 = self._make_layer(block,\n block_inplanes[3],\n layers[3],\n shortcut_type,\n stride=2)\n\n self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1))\n self.fc = nn.Linear(block_inplanes[3] * block.expansion, n_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv3d):\n nn.init.kaiming_normal_(m.weight,\n mode='fan_out',\n nonlinearity='relu')\n elif isinstance(m, nn.BatchNorm3d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n def _downsample_basic_block(self, x, planes, stride):\n out = F.avg_pool3d(x, kernel_size=1, stride=stride)\n zero_pads = torch.zeros(out.size(0), planes - out.size(1), out.size(2),\n out.size(3), out.size(4))\n if isinstance(out.data, torch.npu.FloatTensor):\n zero_pads = zero_pads.npu()\n\n out = torch.cat([out.data, zero_pads], dim=1)\n\n return out\n\n def _make_layer(self, block, planes, blocks, shortcut_type, stride=1):\n downsample = None\n if stride != 1 or self.in_planes != planes * block.expansion:\n if shortcut_type == 'A':\n downsample = partial(self._downsample_basic_block,\n planes=planes * block.expansion,\n stride=stride)\n else:\n downsample = nn.Sequential(\n conv1x1x1(self.in_planes, planes * block.expansion, stride),\n nn.BatchNorm3d(planes * block.expansion))\n\n layers = []\n layers.append(\n block(in_planes=self.in_planes,\n planes=planes,\n stride=stride,\n downsample=downsample))\n self.in_planes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.in_planes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n if not self.no_max_pool:\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n\n return x\n\n\ndef generate_model(model_depth, **kwargs):\n assert model_depth in [10, 18, 34, 50, 101, 152, 200]\n\n if model_depth == 10:\n model = ResNet(BasicBlock, [1, 1, 1, 1], get_inplanes(), **kwargs)\n elif model_depth == 18:\n model = ResNet(BasicBlock, [2, 2, 2, 2], get_inplanes(), **kwargs)\n elif model_depth == 34:\n model = ResNet(BasicBlock, [3, 4, 6, 3], get_inplanes(), **kwargs)\n elif model_depth == 50:\n model = ResNet(Bottleneck, [3, 4, 6, 3], get_inplanes(), **kwargs)\n elif model_depth == 101:\n model = ResNet(Bottleneck, [3, 4, 23, 3], get_inplanes(), **kwargs)\n elif model_depth == 152:\n model = ResNet(Bottleneck, [3, 8, 36, 3], get_inplanes(), **kwargs)\n elif model_depth == 200:\n model = ResNet(Bottleneck, [3, 24, 36, 3], get_inplanes(), **kwargs)\n\n return model\n", "sub_path": "PyTorch/built-in/cv/classification/3D_ResNet_ID0421_for_PyTorch/models/resnet.py", "file_name": "resnet.py", "file_ext": "py", "file_size_in_byte": 8140, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "torch.nn.Conv3d", "line_number": 28, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 28, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 37, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 37, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 44, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 44, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 52, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 84, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 84, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 86, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 88, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 89, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 116, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 116, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 142, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 143, "usage_type": "name"}, {"api_name": "torch.nn.MaxPool3d", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 144, "usage_type": "name"}, {"api_name": "torch.nn.AdaptiveAvgPool3d", "line_number": 163, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 163, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 164, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 164, "usage_type": "name"}, {"api_name": "torch.nn.Conv3d", "line_number": 167, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 167, "usage_type": "name"}, {"api_name": "torch.nn.init.kaiming_normal_", "line_number": 168, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 168, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 168, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 171, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 171, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 172, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 172, "usage_type": "name"}, {"api_name": "torch.nn.init.constant_", "line_number": 173, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 173, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 173, "usage_type": "name"}, {"api_name": "torch.nn.functional.avg_pool3d", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 176, "usage_type": "name"}, {"api_name": "torch.zeros", "line_number": 177, "usage_type": "call"}, {"api_name": "torch.npu", "line_number": 179, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 182, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 194, "usage_type": "name"}, {"api_name": "torch.nn.BatchNorm3d", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 196, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 208, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 208, "usage_type": "name"}]} +{"seq_id": "441821017", "text": "import re\nimport storage\nimport args\nimport requests\nimport concurrent.futures\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urlparse, urljoin\n\n\nclass NaiveCrawler:\n def __init__(self, initial_url, allowed_domains, depth, database, init=True):\n self.init = init\n self.initial_url = initial_url\n self.current_url = \"\"\n self.allowed_domains = allowed_domains\n self.depth = depth\n self.links_to_visit = set()\n self.visited_links = set()\n self.db = database\n self.recall_last_crawl()\n self.display_status_on_init()\n\n \n @staticmethod\n def is_absolute(url):\n return bool(urlparse(url).netloc)\n\n \n def recall_last_crawl(self):\n try:\n prev_state = self.db.json_load()\n if prev_state:\n self.current_url = prev_state[\"current_url\"]\n self.visited_links = set(prev_state[\"visited_links\"])\n self.links_to_visit = set(prev_state[\"links_to_visit\"])\n self.initial_url = self.current_url\n self.init = False\n else:\n pass\n except Exception as ex:\n return ex\n\n\n def display_status_on_init(self):\n print(f\"\\U0001F7E2\\tCrawler starting at:\\n{self.current_url}\\n\")\n print(f\"\\U0001F645\\tRestricted to crawl {len(self.allowed_domains)} domain(s):\\n{self.allowed_domains} for depth: {self.depth}\")\n\n\n\n def is_valid(self, candidate):\n if candidate in self.visited_links:\n return False \n \n if re.search('tel:', candidate)\\\n or re.search('mailto:', candidate)\\\n or re.search('#', candidate):\n return False\n\n # Fetch domain name (including potential subdomain)\n current_domain_name = urlparse(candidate).netloc\n # try:\n # current_subdomain = current_domain_name.split('.')[0]\n # except Exception:\n # # No subdomain\n # pass\n\n # Validate if traversal is restricted\n if current_domain_name not in self.allowed_domains:\n return False\n \n\n url_ojbect = urlparse(candidate)\n return any([url_ojbect.scheme, url_ojbect.netloc, url_ojbect.path])\n\n\n @staticmethod \n def get_relative_path(href):\n if href.startswith(\"/\"):\n return href[1:len(href)]\n return href\n\n \n def get_links(self):\n try:\n if self.init:\n self.links_to_visit.add(self.initial_url)\n self.init = False\n\n # Pop out an arbitrary element from the set\n self.current_link = self.links_to_visit.pop()\n \n current_page = requests.get(self.current_link)\n print(f\"\\n\\U0001F577\\U0001F578\\tCrawler \\U0001F440 at:\\n{self.current_link}\")\n\n self.visited_links.add(self.current_link)\n\n soup = BeautifulSoup(current_page.content, 'html.parser')\n \n return soup.find_all('a')\n\n except Exception:\n print(\"\\U0001F6AB Invalid URL.\")\n return False\n \n\n def crawl(self):\n links = self.get_links()\n\n if links:\n for i, link in enumerate(links):\n if link is not None:\n link_href = link.get('href')\n \n if not self.is_absolute(link_href):\n relative_path = self.get_relative_path(link_href)\n parsed_linked_href = urlparse(link_href)\n scheme = parsed_linked_href.scheme\n\n current_domain_name = urlparse(self.current_link).netloc\n if not scheme: scheme = 'http'\n link_href = f\"{scheme}://{current_domain_name}/{relative_path}\"\n\n if not self.is_valid(link_href):\n continue\n self.links_to_visit.add(link_href)\n\n print(f\"Links to visit: {len(self.links_to_visit)}\")\n \n\n \n def initium(self):\n try:\n if self.init:\n threads = 1\n else:\n threads = min(32, len(self.links_to_visit)+1)\n\n for i in range(self.depth):\n # print(f'\\n\\U0001F577\\U0001F578\\tCrawler_{i}')\n with concurrent.futures.ThreadPoolExecutor(max_workers=threads) as dominus:\n dominus.submit(self.crawl())\n \n print(f\"\\U0001F534\\tCrawler stopped after crawling {len(self.visited_links)} link(s).\")\n print(f\"\\U0001F481\\tFound {len(self.links_to_visit)} page(s) to crawl.\\n\")\n\n # Save the state\n self.salvare()\n \n except Exception as ex:\n print(f\"The following error occured:\\n{ex}\")\n return\n\n\n def salvare(self):\n state = {\n \"current_url\": self.current_link,\n \"visited_links\": list(self.visited_links),\n \"links_to_visit\": list(self.links_to_visit)\n }\n \n self.db.json_save(state)\n\n", "sub_path": "naive_crawler.py", "file_name": "naive_crawler.py", "file_ext": "py", "file_size_in_byte": 5093, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "urllib.parse.urlparse", "line_number": 26, "usage_type": "call"}, {"api_name": "re.search", "line_number": 54, "usage_type": "call"}, {"api_name": "re.search", "line_number": 55, "usage_type": "call"}, {"api_name": "re.search", "line_number": 56, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 60, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 72, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 92, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 97, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 116, "usage_type": "call"}, {"api_name": "urllib.parse.urlparse", "line_number": 119, "usage_type": "call"}, {"api_name": "concurrent.futures.futures.ThreadPoolExecutor", "line_number": 140, "usage_type": "call"}, {"api_name": "concurrent.futures.futures", "line_number": 140, "usage_type": "attribute"}, {"api_name": "concurrent.futures", "line_number": 140, "usage_type": "name"}]} +{"seq_id": "12884900", "text": "#!/usr/bin/python3\n# -*- coding:utf-8 -*-\n\nimport pymysql\nimport sys\nimport datetime\nimport csv\nimport json\nimport calendar\nfrom collections import Counter\n\n\nfichero = '/home/administrator/Scripts/config/rules.csv'\nnetwork_rules = []\nhost_rules = []\ncontador_network = 0\ncontador_host = 0\n\n\n\n\ndACD = 0\ndClientesEmpleados = 0\ndPCI = 0\ndMedios = 0\ndAdjuntos = 0\n\ndef imprimirFecha(fe):\n\tif 'Mon' in fe:\n\t\treturn 'Lunes'\n\tif 'Tue' in fe:\n\t\treturn 'Martes'\n\tif 'Wed' in fe:\n\t\treturn 'Miercoles'\n\tif 'Thu' in fe:\n\t\treturn 'Jueves'\n\tif 'Fri' in fe:\n\t\treturn 'Viernes'\n\tif 'Sat' in fe:\n\t\treturn 'Sabado'\n\tif 'Sun' in fe:\n\t\treturn 'Domingo'\n\telse:\n\t\treturn 'Mal Dia'\nconn = None\n\n\n\nnombres = []\nelementos = []\nprimera_linea = []\nprimera_linea.append('Categories')\nnext_linea = []\n\nf = csv.reader(open(fichero, newline = \"\",encoding=\"utf-8\"))\nfor row in f:\n\t\n\tif row[0] == 'network':\n\t\tnetwork_rules.append([])\n\t\tfor i in range (1,len(row)):\n\t\t\tnetwork_rules[contador_network].append(row[i])\n\t\tcontador_network += 1\n\telif row[0] == 'host':\n\t\tnetwork_host.append([])\n\t\tfor i in range (1,len(row)):\n\t\t\thost_rules[contador_host].append(row[i])\n\t\tcontador_host += 1\n\t\n\nfor i in range(len(network_rules)):\n\tfor j in range(len(network_rules[i])):\n\t\t\tif j == 0:\n\t\t\t\tprimera_linea.append(network_rules[i][j])\n\t\t\tprint(network_rules[i][j])\n\nelementos.append(primera_linea)\n\ntry:\n\n\tconn = pymysql.connect(host='localhost',user='root',\n\t\t\t\t\t\tpasswd='q1w2e3r4t5', db='dlp')\n\n\n\tcur=conn.cursor()\n\tnow = datetime.datetime.now()\n\tdias = 7\n\tfechas = []\n\tfechas_semanal = []\n\tTotal = 0\n\n\tfor i in range(dias,0,-1):\n\t\tfechas.append((now + datetime.timedelta(days=-i)).strftime(\"%a %b %d % %Y\"))\n\t\tfechas_semanal.append((now + datetime.timedelta(days=-i)).strftime(\"%d/%m/%Y\"))\n\n\tfor i in range(len(fechas)):\n\t\tnext_linea = []\n\t\tfecha = (\"(Timestamp like '\") + fechas[i] + (\"')\")\n\t\tprint(imprimirFecha(fechas[i]))\n\t\tprint(fechas[i])\n\t\tnext_linea.append(fechas_semanal[i])\n\t\tfor x in range(len(network_rules)):\n\t\t\tfor y in range(1,len(network_rules[x])):\n\t\t\t\tbuscar = ('SELECT DISTINCT incidentid, count(Rule) FROM ndlp where rule like \"') + network_rules[x][y] +('\" and ') + fecha\n\t\t\t\tcur.execute (buscar)\n\t\t\t\tfe1 = cur.fetchone()\n\t\t\t\tTotal = Total + fe1[1]\n\t\t\tprint (network_rules[x][0], Total)\n\t\t\tnext_linea.append(Total)\n\t\t\tTotal = 0\n\t\telementos.append(next_linea)\n\t\t\t\n\t\t\n\t\t#if (i == 0): elementos.append(['State',sGenerales,sClientesEmpleados,sDocInterna,sPCI,sMedios,sAdjuntos])\n\t\t#if (i == 0): elementos.append(['Categories',sACD,sClientesEmpleados,sDocInterna,sPCI,sMedios])\n\t\t#elementos.append([imprimirFecha(fechas[i]),dGenerales,dClientesEmpleados,dDocInterna,dPCI,dMedios,dAdjuntos])\n\t\t#elementos.append([fechas_semanal[i],dACD,dClientesEmpleados,dDocInterna,dPCI,dMedios])\n\t\t#print(nombres)\n\tfor x in range(len(elementos)):\n\t\tprint(elementos[x])\n\t\n\nexcept pymysql.Error as e:\n\n\tprint (\"Error en connexión a BBDD\")\n\tsys.exit(1)\n\nfinally:\n\n\tif conn:\n\t\tconn.close()\n\nfilename = 'dsemanal.csv'\n\n\n\nwith open(filename, 'wt', encoding='utf8') as f:\n\twriter = csv.writer(f)\n\twriter.writerows(elementos)\n", "sub_path": "proyectos/graph/data/get_dsemanal.py", "file_name": "get_dsemanal.py", "file_ext": "py", "file_size_in_byte": 3080, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "csv.reader", "line_number": 55, "usage_type": "call"}, {"api_name": "pymysql.connect", "line_number": 80, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 85, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 85, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 92, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 93, "usage_type": "call"}, {"api_name": "pymysql.Error", "line_number": 122, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 125, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 137, "usage_type": "call"}]} +{"seq_id": "591888208", "text": "import psycopg2\nimport os\nimport csv\n\ndef create_connection():\n #with open(os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'DatabaseUI', 'pass.conf'))) as file_handler:\n # file_handler.readline()\n return psycopg2.connect(database=\"data\",user=\"ag\",host=\"138.68.81.207\",port=5432,password=os.environ['DATABASE_CRED'])\n\nclass WellDataWorker():\n WELL_COMPLETIONS_ID = 'WELLC'\n MONTHLY_WELL_PRODUCTIONS_ID = 'MONPROD'\n\n def get_all_operator_names(self):\n with create_connection() as connection:\n query = \"SELECT DISTINCT(operator_name) FROM wells_apis WHERE operator_name IS NOT NULL;\"\n cursor = connection.cursor()\n cursor.execute(query)\n operator_names = []\n for row in cursor:\n operator_names.append(row[0])\n return operator_names\n return []\n\n def get_all_counties(self):\n with create_connection() as connection:\n query = \"SELECT DISTINCT(county) FROM well_completions WHERE county IS NOT NULL;\"\n cursor = connection.cursor()\n cursor.execute(query)\n counties = []\n for row in cursor:\n counties.append(row[0])\n return counties\n return []\n\n def get_all_formations(self):\n with create_connection() as connection:\n query = \"SELECT DISTINCT(formation) FROM well_completions WHERE formation IS NOT NULL;\"\n cursor = connection.cursor()\n cursor.execute(query)\n formations = []\n for row in cursor:\n formations.append(row[0])\n return formations\n return []\n\n def get_all_api_counties(self,seq,sidetrack):\n return self.get_relative_part_from_database('api_county_code','api_seq_num','sidetrack_num',seq, sidetrack)\n\n def get_all_api_sequences(self,county,sidetrack):\n return self.get_relative_part_from_database('api_seq_num','api_county_code','sidetrack_num', county, sidetrack)\n\n def get_all_api_sidetracks(self,county,seq):\n return self.get_relative_part_from_database('sidetrack_num','api_county_code','api_seq_num',county,seq)\n\n def get_all_townships(self, range, meridian):\n return self.get_relative_part_from_database('twp','range','meridian',range,meridian, table='well_completions')\n\n def get_all_ranges(self, township, meridian):\n return self.get_relative_part_from_database('range','twp','meridian',township,meridian, table='well_completions')\n\n def get_all_meridians(self, township, range):\n return self.get_relative_part_from_database('meridian','twp','range',township,range, table='well_completions')\n\n def get_relative_part_from_database(self, main_column_name, side_1_col_name, side_2_col_name, val_1, val_2, table='wells_apis'):\n with create_connection() as connection:\n where = ''\n values = []\n if val_1:\n where += \" \" + side_1_col_name +\"=%s\"\n values.append(val_1)\n if val_2:\n if val_1:\n where += ' AND'\n where += \" \" + side_2_col_name +\"=%s\"\n values.append(val_2)\n if where:\n where = ' WHERE' + where\n query = \"SELECT DISTINCT(\" + main_column_name + \") FROM \" + table + where\n cursor = connection.cursor()\n cursor.execute(query, values)\n result = []\n for row in cursor:\n result.append(row[0])\n return result\n return []\n\n def get_rigs(self):\n return self.execute_query('SELECT * FROM rigs;',[])\n\n def update_rigs(self, rigs_to_update):\n query = ''\n params = []\n for rig_pk in rigs_to_update.keys():\n contractor = rig_pk.split('|')[0]\n number = rig_pk.split('|')[1]\n w = rigs_to_update[rig_pk]\n query += ' UPDATE rigs SET well_num_name=%s, well_name=%s, well_num=%s WHERE contractor=%s AND number=%s;'\n params += w + [contractor,number]\n with create_connection() as connection:\n cursor = connection.cursor()\n cursor.execute(query, params)\n connection.commit()\n \n\n def execute_query(self, query, parameters):\n with create_connection() as connection:\n result = []\n cursor = connection.cursor()\n cursor.execute(query, parameters)\n for row in cursor:\n result.append(row)\n for i in range(len(result)):\n result[i] = list(result[i])\n for j in range(len(result[i])):\n result[i][j] = result[i][j] if result[i][j] else \"\"\n return result\n\n def to_csv(self, request, id):\n if id == WellDataWorker.WELL_COMPLETIONS_ID:\n last_executed_query = request.session['last_executed_query'].replace('*','api_county_code, api_seq_num, sidetrack_num')\n last_executed_parameters = request.session['last_executed_parameters']\n query = '''SELECT * FROM well_completions as wc\n WHERE (wc.api_county_code, wc.api_seq_num, wc.sidetrack_num) IN (%s)''' % last_executed_query\n return (query,last_executed_parameters)\n if id != WellDataWorker.WELL_COMPLETIONS_ID:\n last_executed_query = request.session[id]['query']\n if last_executed_query[-1] == ';':\n last_executed_query = last_executed_query[:-1]\n params = request.session[id]['params']\n query = '''SELECT DISTINCT q.*,\n wc.* FROM (%s) as q\n LEFT JOIN well_completions as wc\n ON q.api_county_code = wc.api_county_code\n AND q.api_seq_num = wc.api_seq_num \n AND q.sidetrack_num = wc.sidetrack_num''' % last_executed_query\n return (query,params)\n \n def get_columns_of(self, table):\n query = \"\"\"SELECT column_name\n FROM information_schema.columns\n WHERE table_schema = 'public'\n AND table_name = %s\"\"\"\n return self.execute_query(query, [table])", "sub_path": "app/DatabaseWorker.py", "file_name": "DatabaseWorker.py", "file_ext": "py", "file_size_in_byte": 6235, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "psycopg2.connect", "line_number": 8, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 8, "usage_type": "attribute"}]} +{"seq_id": "91532489", "text": "import os\nfrom pathlib import Path\nimport json\nimport typing\n\nimport pytest\nimport yaml\nfrom pyrsistent import pmap\n\nfrom noobit_markets.definitions import ROOT_PATH, APP_PATH\nfrom noobit_markets.base.request import *\nfrom noobit_markets.exchanges.kraken.rest.base import *\nfrom noobit_markets.exchanges.kraken.rest.public.symbols.response import *\n\n\nrel_path = \"tests/cassettes/test_symbols/test_symbols.yaml\"\nfull_path = os.path.join(ROOT_PATH, rel_path)\n\n\n@pytest.fixture\ndef load_yaml():\n with open(full_path) as y:\n rec = yaml.safe_load(y)\n return rec\n\ndef test_get_response_status_code(load_yaml):\n\n for interaction in load_yaml[\"interactions\"]:\n req = interaction[\"request\"]\n resp = interaction[\"response\"]\n assert resp[\"status_code\"] == 200\n\n\ndef test_get_error_content(load_yaml):\n\n for interaction in load_yaml[\"interactions\"]:\n req = interaction[\"request\"]\n resp = interaction[\"response\"]\n assert isinstance(get_error_content(resp), frozenset)\n #! we need to manually change to response[_content] in yaml file\n assert isinstance(json.loads(resp[\"_content\"])[\"error\"], list)\n assert get_error_content(resp) == frozenset(json.loads(resp[\"_content\"])[\"error\"])\n\n\ndef test_get_result_content(load_yaml):\n\n for interaction in load_yaml[\"interactions\"]:\n req = interaction[\"request\"]\n resp = interaction[\"response\"]\n returned = get_result_content(resp)\n assert isinstance(returned, typing.Mapping)\n expected = pmap(json.loads(resp[\"_content\"])[\"result\"])\n assert isinstance(expected, typing.Mapping)\n\n assert returned == expected\n\n\ndef test_validate_result_content(load_yaml):\n\n for interaction in load_yaml[\"interactions\"]:\n req = interaction[\"request\"]\n resp = interaction[\"response\"]\n result_content = pmap(json.loads(resp[\"_content\"])[\"result\"])\n filtered_result_content = filter_result_content_symbols(result_content)\n returned = validate_raw_result_content_symbols(filtered_result_content)\n assert isinstance(returned, Ok)\n assert isinstance(returned.value, KrakenResponseSymbols)", "sub_path": "src/noobit_markets/exchanges/kraken/rest/public/symbols/test/progress_test_response_symbols.py", "file_name": "progress_test_response_symbols.py", "file_ext": "py", "file_size_in_byte": 2201, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.path.join", "line_number": 17, "usage_type": "call"}, {"api_name": "noobit_markets.definitions.ROOT_PATH", "line_number": 17, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "yaml.safe_load", "line_number": 23, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 20, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 41, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 42, "usage_type": "call"}, {"api_name": "typing.Mapping", "line_number": 51, "usage_type": "attribute"}, {"api_name": "pyrsistent.pmap", "line_number": 52, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 52, "usage_type": "call"}, {"api_name": "typing.Mapping", "line_number": 53, "usage_type": "attribute"}, {"api_name": "pyrsistent.pmap", "line_number": 63, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "258301953", "text": "# -*- coding: utf-8 -*-\n##############################################################################\n#\n#\n##############################################################################\n\nfrom openerp.osv import osv, fields\nfrom openerp.tools.translate import _\nfrom openerp.report import report_sxw\nfrom datetime import datetime\nfrom datetime import date\n\nclass nstda_mnc_task (osv.osv):\n _name = \"nstda.mnc.task\"\n _description = \"MNC Task Management\"\n _columns = {\n 'mnc_task': fields.char('Task'),\n 'mnc_task_action_date': fields.date('Action date'),\n 'mnc_task_id':fields.many2one('nstda.mnc', 'MNC'),\n }\nnstda_mnc_task()\n\n\nclass nstda_mnc_file(osv.osv):\n \n \n def _get_code(self, cr, uid, ids, field, arg, context=None):\n res = {}\n name = ''\n for g in self.browse(cr, uid, ids, context):\n prefix_mnc_mane = g.mnc_file_id.name\n date = str(g.mnc_date).replace(\"-\", \".\")\n name = '%s.%s' % (prefix_mnc_mane,date)\n res[g.id] = name\n return res\n \n \n _name = \"nstda.mnc.file\"\n _description = \"MNC File Management\"\n _columns = {\n 'mnc_name' : fields.function(_get_code, type='char', string='Code', readonly=True, store=True),\n 'mnc_file': fields.binary('Document'),\n 'mnc_filename': fields.char('Filename', size=500),\n 'mnc_metadata':fields.char('Metadata'),\n 'mnc_file_id':fields.many2one('nstda.mnc', 'MNC'),\n 'mnc_date': fields.date('Upload Date'),\n 'create_date': fields.datetime('Upload Date', readonly=True),\n 'create_uid': fields.many2one('res.users', 'Owner', readonly=True),\n }\n _defaults = {\n 'mnc_date': fields.date.context_today, \n }\nnstda_mnc_file()\n\nclass nstda_mnc(osv.osv):\n \n \n\n\n def create(self, cr, uid, vals, context=None):\n sequence=self.pool.get('ir.sequence').get(cr, uid, 'mnc.code')\n vals['code'] = int(float(sequence))\n return super(nstda_mnc, self).create(cr, uid, vals, context=context)\n \n def _get_rec_name(self, cr, uid, ids, field, arg, context=None):\n res = {}\n name = ''\n for g in self.browse(cr, uid, ids, context):\n code = g.code\n title = g.title\n name = '%s %s' % (code,title)\n res[g.id] = name\n return res\n \n def _get_code(self, cr, uid, ids, field, arg, context=None):\n res = {}\n name = ''\n for g in self.browse(cr, uid, ids, context):\n code = g.code\n name = 'MNC.%s' % (code)\n res[g.id] = name\n return res\n \n def _have_grp(self, cr, uid, ids, field, arg, context=None):\n res = {}\n for g in self.browse(cr, uid, ids, context):\n cr.execute(\"select gid from res_groups_users_rel where uid = %s and gid in \\\n (select id from res_groups where name in ('User') and category_id in \\\n (select id from ir_module_category where name = 'NSTDA MNC'))\", (uid,))\n ids = cr.fetchone()\n group_id = ids and ids[0] or 0 \n \n if not group_id:\n raise osv.except_osv(_('Access Denied!!'),_(\"Sorry, you are not allowed to access this document.\\n\\nPlease contact the administrator\"))\n res[g.id] = True\n return res\n \n _name = \"nstda.mnc\"\n _inherit = ['mail.thread']\n _description = \"MNC\"\n _rec_name = 'rec_name_fnc'\n _columns = {\n 'rec_name_fnc': fields.function(_get_rec_name, type='char', string='leave_type'),\n 'code' : fields.integer('No', readonly=True),\n 'name' : fields.function(_get_code, type='char', string='Code', readonly=True, store=True),\n 'title' : fields.char('Title',size=200),\n 'detail' : fields.text('Detail'),\n# 'action_date': fields.date('Action date'),\n 'mnc_task_ids' : fields.one2many('nstda.mnc.task', 'mnc_task_id', 'Task(s)'), \n 'mnc_file_ids' : fields.one2many('nstda.mnc.file', 'mnc_file_id', 'Attachment(s)'), \n 'summary' : fields.text('Summary'),\n 'employee_ids': fields.many2many('nstdamas.employee', 'export_employee_rel', 'employee_ids', 'emp_id', 'employee_ids'),\n 'write_date':fields.datetime('Last Update'),\n 'state': fields.selection([\n ('open', 'Open'),\n ('close', 'Close')],\n 'Status', readonly=False),\n# 'have_grp' : fields.function(_have_grp, type='boolean', string='have_grp', readonly=True, store=False),\n }\n _defaults = {\n 'code': lambda *a: 0,\n 'state': lambda *a:'open',\n }\n _order = 'code desc'\n\nnstda_mnc()\n", "sub_path": "mnc/mnc.py", "file_name": "mnc.py", "file_ext": "py", "file_size_in_byte": 4667, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "openerp.osv.osv.osv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "openerp.osv.osv", "line_number": 13, "usage_type": "name"}, {"api_name": "openerp.osv.fields.char", "line_number": 17, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 17, "usage_type": "name"}, {"api_name": "openerp.osv.fields.date", "line_number": 18, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 18, "usage_type": "name"}, {"api_name": "openerp.osv.fields.many2one", "line_number": 19, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 19, "usage_type": "name"}, {"api_name": "openerp.osv.osv.osv", "line_number": 24, "usage_type": "attribute"}, {"api_name": "openerp.osv.osv", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 32, "usage_type": "name"}, {"api_name": "datetime.date", "line_number": 33, "usage_type": "name"}, {"api_name": "openerp.osv.fields.function", "line_number": 41, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 41, "usage_type": "name"}, {"api_name": "openerp.osv.fields.binary", "line_number": 42, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 42, "usage_type": "name"}, {"api_name": "openerp.osv.fields.char", "line_number": 43, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 43, "usage_type": "name"}, {"api_name": "openerp.osv.fields.char", "line_number": 44, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 44, "usage_type": "name"}, {"api_name": "openerp.osv.fields.many2one", "line_number": 45, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 45, "usage_type": "name"}, {"api_name": "openerp.osv.fields.date", "line_number": 46, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 46, "usage_type": "name"}, {"api_name": "openerp.osv.fields.datetime", "line_number": 47, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 47, "usage_type": "name"}, {"api_name": "openerp.osv.fields.many2one", "line_number": 48, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 48, "usage_type": "name"}, {"api_name": "openerp.osv.fields.date", "line_number": 51, "usage_type": "attribute"}, {"api_name": "openerp.osv.fields", "line_number": 51, "usage_type": "name"}, {"api_name": "openerp.osv.osv.osv", "line_number": 55, "usage_type": "attribute"}, {"api_name": "openerp.osv.osv", "line_number": 55, "usage_type": "name"}, {"api_name": "openerp.osv.osv.except_osv", "line_number": 94, "usage_type": "call"}, {"api_name": "openerp.osv.osv", "line_number": 94, "usage_type": "name"}, {"api_name": "openerp.tools.translate._", "line_number": 94, "usage_type": "call"}, {"api_name": "openerp.osv.fields.function", "line_number": 103, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 103, "usage_type": "name"}, {"api_name": "openerp.osv.fields.integer", "line_number": 104, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 104, "usage_type": "name"}, {"api_name": "openerp.osv.fields.function", "line_number": 105, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 105, "usage_type": "name"}, {"api_name": "openerp.osv.fields.char", "line_number": 106, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 106, "usage_type": "name"}, {"api_name": "openerp.osv.fields.text", "line_number": 107, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 107, "usage_type": "name"}, {"api_name": "openerp.osv.fields.one2many", "line_number": 109, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 109, "usage_type": "name"}, {"api_name": "openerp.osv.fields.one2many", "line_number": 110, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 110, "usage_type": "name"}, {"api_name": "openerp.osv.fields.text", "line_number": 111, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 111, "usage_type": "name"}, {"api_name": "openerp.osv.fields.many2many", "line_number": 112, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 112, "usage_type": "name"}, {"api_name": "openerp.osv.fields.datetime", "line_number": 113, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 113, "usage_type": "name"}, {"api_name": "openerp.osv.fields.selection", "line_number": 114, "usage_type": "call"}, {"api_name": "openerp.osv.fields", "line_number": 114, "usage_type": "name"}]} +{"seq_id": "379086062", "text": "import psycopg2\n\nfrom configparser import ConfigParser\nfrom psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT\n\nfrom _functions.config import config\n\n\ndef drop_database(dbname):\n\n # Configure parser for database.ini\n parser = ConfigParser()\n parser.read('database_interactions/database.ini')\n\n # If Database line exists remove it otherwise pass\n try:\n parser.remove_option('postgresql', 'database')\n with open('database.ini', 'w') as configFile:\n parser.write(configFile)\n except ValueError:\n print(ValueError)\n pass\n\n try:\n # Use config functie to get values from database.ini\n db = config()\n con = psycopg2.connect(**db)\n cursor = con.cursor()\n con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n drop_table_command = f\"DROP DATABASE {dbname};\"\n cursor.execute(drop_table_command)\n con.commit()\n print('Database has been dropped')\n con.close()\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n\n\ndef create_database(dbname='huwebshop'):\n \"\"\"\n Function to create a new database and extend the database.ini file.\n :param dbname:\n :return:\n \"\"\"\n\n # Configure parser for database.ini\n parser = ConfigParser()\n parser.read('database_interactions/database.ini')\n\n # If Database line exists remove it otherwise pass\n try:\n parser.remove_option('postgresql', 'database')\n with open('database.ini', 'w') as configFile:\n parser.write(configFile)\n except ValueError:\n pass\n\n try:\n # Use config functie to get values from database.ini\n db = config()\n con = psycopg2.connect(**db)\n con.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n cursor = con.cursor()\n sqlCreateDatabase = \"create database \" + dbname + \";\"\n cursor.execute(sqlCreateDatabase)\n con.commit()\n con.close()\n print('Database created')\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n\n # Add database name to the database.ini file\n parser['postgresql']['database'] = dbname\n with open('database.ini', 'w') as configFile:\n parser.write(configFile)\n\n print(f'{dbname} aangevuld aan de database.ini file')\n\n\ndef fill_database(sqlfile='database_interactions/huwebshop.sql'):\n \"\"\"\n Fill the database with the database structure from a sql file.\n :param sqlfile:\n :return:\n \"\"\"\n db = config()\n con = psycopg2.connect(**db)\n cursor = con.cursor()\n # Open the given sql file\n with open(sqlfile, 'r') as file:\n content_list = [line.rstrip(';') for line in file]\n\n tempLst = []\n for i in content_list:\n if i[:2] != '--' and i != '\\n':\n tempLst.append(i[:-1])\n content_list = tempLst\n\n separator = ' '\n content = separator.join(content_list)\n content_list = content.split(';')\n\n tempLst = []\n for i in content_list:\n tempLst.append(i + ';')\n tempLst.remove(tempLst[-1])\n content_list = tempLst\n\n for i in content_list:\n cursor.execute(i)\n con.commit()\n con.close()\n", "sub_path": "database_interactions/_functions/setup_database.py", "file_name": "setup_database.py", "file_ext": "py", "file_size_in_byte": 3239, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "configparser.ConfigParser", "line_number": 12, "usage_type": "call"}, {"api_name": "_functions.config.config", "line_number": 26, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 27, "usage_type": "call"}, {"api_name": "psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT", "line_number": 29, "usage_type": "argument"}, {"api_name": "psycopg2.DatabaseError", "line_number": 36, "usage_type": "attribute"}, {"api_name": "configparser.ConfigParser", "line_number": 48, "usage_type": "call"}, {"api_name": "_functions.config.config", "line_number": 61, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 62, "usage_type": "call"}, {"api_name": "psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT", "line_number": 63, "usage_type": "argument"}, {"api_name": "psycopg2.DatabaseError", "line_number": 71, "usage_type": "attribute"}, {"api_name": "_functions.config.config", "line_number": 88, "usage_type": "call"}, {"api_name": "psycopg2.connect", "line_number": 89, "usage_type": "call"}]} +{"seq_id": "292364581", "text": "import argparse\nimport dqn_agent\nimport gym\n\nparser = argparse.ArgumentParser(description=\"Starts an DQN Agent to learn a given atari environment with pixels as input. Standard environment is Pong-v0\")\nparser.add_argument(\"--environment\", \"-env\", default=\"Pong-v0\", dest=\"env\", type=str, help=\"chose an atari environment with pixel input\")\nparser.add_argument(\"--render\", \"-r\", action=\"store_true\", dest=\"render\", help=\"Set this flag to let the training session be rendered\")\nparser.add_argument(\"--steps\", \"-st\", default=5000000, dest=\"steps\", type=int, help=\"the duration in which the agent learns\")\nparser.add_argument(\"--nothing\", \"-n\", default=30, dest=\"nothing\", type=int, help=\"the maximum steps an aget may do nothing at the begining of each round\")\nparser.add_argument(\"--memory\", \"-m\", default=300000, dest=\"memory\", type=int, help=\"the size of the memory of the agent\")\nparser.add_argument(\"--pretrain\", \"-pre\", default=50000, dest=\"pretrain\", type=int, help=\"the amount of memories the agent gathers before starting to train\")\nparser.add_argument(\"--gamma\", \"-g\", default=0.99, dest=\"gamma\", type=float, help=\"the discount factor of the bellman equation\")\nparser.add_argument(\"--update\" \"-u\", default=50000, dest=\"update\", type=int, help=\"the number of steps until the target network is updated\")\nparser.add_argument(\"--start\", \"-s\", default=1.0, dest=\"start\", type=float, help=\"exploration start. 1 = 100%% random actions\")\nparser.add_argument(\"--end\", \"-e\", default=0.1, dest=\"end\", type=float, help=\"the exploration rate doesn't go below this value\")\nparser.add_argument(\"--exploresteps\", \"-exs\", default=1000000, dest=\"explore\", type=int, help=\"the amount of steps the agent has to take until the end of the exploration rate is reached\")\nparser.add_argument(\"--play\", \"-p\", action=\"store_true\", dest=\"play\", help=\"set this flag if you just want to let the agent play\")\nparser.add_argument(\"--framestacksize\", \"-fss\", default=4, dest=\"framestacksize\", type=int, help=\"the size of the frames stacked together to give the agent a feeling for movement\")\nparser.add_argument(\"--batch\", \"-b\", default=32, type=int, dest=\"batch\", help=\"The size of the trainingsbatch\")\nparser.add_argument(\"--continue\", \"-c\", default=\"NONE\", type=str, dest=\"cont\", help=\"Continue a training session with given weights. Needs the path to the weights as argument.\")\n\nargs = parser.parse_args()\nenv = gym.make(args.env)\n\nif(args.play):\n print(\"Just letting the Agent play\")\nelse:\n print( \"Starting a new training session with the following configurations:\\n\\\n environment: {0}\\n\\\n continue: {12}\\n\\\n memory size: {1}\\n\\\n duration in steps: {2}\\n\\\n pre training memory filling: {3}\\n\\\n frame stack size: {4}\\n\\\n explore start: {5}\\n\\\n explore end: {6}\\n\\\n explore steps: {7}\\n\\\n gamma: {8}\\n\\\n batch size: {9}\\n\\\n training: {10}\\n\\\n rendering: {11}\\n\\n\\\n initializing neural network...\\n\".format(\n args.env, args.memory, args.steps,\n args.pretrain, args.framestacksize,\n args.start, args.end, args.explore,\n args.gamma, args.batch, not args.play, args.render,\n args.cont))\natari_agent = dqn_agent.DqnAgent(input_shape=(args.framestacksize, 84, 84), env=env, MAX_STEPS=args.steps,\n MAX_DOING_NOTHING=args.nothing, FRAME_SIZE=args.framestacksize,\n BATCH_SIZE=args.batch, MEMORY_SIZE=args.memory, PRETRAIN_LENGTH=args.pretrain,\n NETWORK_UPDATE=args.update, EXPLORE_START=args.start, EXPLORE_END=args.end,\n EXPLORE_STEPS=args.explore, GAMMA=args.gamma, is_training=not args.play, is_rendering=args.render,\n path_to_weights=args.cont)\n\nif not args.play:\n atari_agent.train()\n\natari_agent.play()\n", "sub_path": "dqn_atari.py", "file_name": "dqn_atari.py", "file_ext": "py", "file_size_in_byte": 3977, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 5, "usage_type": "call"}, {"api_name": "gym.make", "line_number": 23, "usage_type": "call"}, {"api_name": "dqn_agent.DqnAgent", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "395202463", "text": "#This is the main module for my weather app\n#I'm submitting an API request to Open Weather Map's API (https://openweathermap.org/api)\n\n\n#import daily_forecast_functions as dff\n#import extended_forecast_functions as eff\nimport sys\nimport csv\nimport requests\nimport json\nimport logbook\nimport time\n\n\ndef main():\n \"\"\"Weather app main module\n daily_forecast_functions and extended_forecast_functions contain individual descriptions of what each function does\"\"\"\n \n print(\"~~~~~~Erich's Weather~~~~~~~~\")\n print(\"We will check the weather at various climbing destinations by state\")\n\n forecast_time = input(\"Do you want today's forecast or 5 day forecast? (Enter [1] or [5]) : \")\n if forecast_time == '1':\n user_state_choice = state_choice(input(\"Enter state abbreviation (in caps) : \"))\n \n city_id_list = user_state_choice[0] #list of city ids returned from state_choice\n climbing_area_alias = user_state_choice[1] #list of climbing area aliases returned from state_choice\n \n json_data = single_dynamic_api_request(city_id_list)\n\n display_conditions_today(json_data, climbing_area_alias)\n\n\n \ndef state_choice(state):\n \"\"\"Asks for user_state_choice and imports csv('ClimbingAreasCityID.csv') of climbing areas by state. Looks for state in list, then returns list \n of city_ids and their climbing_area_alias for each location\"\"\"\n \n city_id_list = []\n climbing_area_alias = []\n \n with open('ClimbingAreasInfo.csv') as file:\n reader = csv.reader(file)\n my_list = list(reader)\n \n for i in my_list:\n if i[0] == state:\n climbing_area_alias.append(i[2])\n city_id_list.append(i[3])\n return city_id_list, climbing_area_alias \n \n\ndef single_dynamic_api_request(city_id_list):\n \"\"\"city_id_list is returned from state_choice and uses them to create api request to return weather for city.\"\"\"\n \n city_id_string = ','.join([str(city) for city in city_id_list]) #String comprehension to make one string from list of strings in city_id_list ex:\"763942,539671,334596\". API request can take up to 20 cities at at time\n \n request = requests.get(f'http://api.openweathermap.org/data/2.5/group?APPID=333de4e909a5ffe9bfa46f0f89cad105&id={city_id_string}&units=imperial')\n\n json_data = json.loads(request.text)\n \n# pprint(json_data) #including this in case you want to see pprint json data for each city in list\n return json_data \n\n\ndef display_conditions_today(json_data, climbing_area_alias):\n \"\"\"gets json data from create_dynamic_api_request (after API request is made)\n and climbing_area_alias is list of climbing areas near towns (not necessarily town names themselves).\n Then, this function parses the JSON data returned for each location and prints it in a human readable format.\n Lastly, creates a conditions score for each location in the conditions dict (temp x humidity for each location)\n and prints the lowest conditions score for today. However, this logic is flawed because at some point it gets too cold or too hot and \n conditions worsen again. \n \n In the future I need to build in stops for temps that are too cold or too hot or too humid.\"\"\"\n \n api_log = logbook.Logger(\"API Level (display_conditions_today())\")\n t_begin = time.time()\n \n \n print(\"Today's climbing weather forecast... \\n\")\n \n climbing_area_alias_count = 0\n #I create this count because climbing_alias_for_city is a list of climbing areas and I need to step through them each time I print info for a city\n #EX: Birmingham is the city but Moss Rock Preserve is the climbing area. Each time I run the 'for city in data['list']' loop, I need the corresponding climbing area name for each time I loop through\n\n try: #try/except block here to handle KeyError\n for city in json_data['list']:\n if city['sys']['country'] != 'US':\n print(f\"City: {city['name']}, {city['sys']['country']}\")\n print(f\"Climbing area: {climbing_area_alias[climbing_area_alias_count]}\")\n print(f\"Temp today: {city['main']['temp']}\")\n print(f\"Humidity: {city['main']['humidity']}\")\n print(f\"Weather: {city['weather'][0]['description']}\\n\") #need ['weather'][0]['description']. The description is item 0 in a list inside 'weather'\n climbing_area_alias_count += 1\n else:\n print(f\"City: {city['name']}\")\n print(f\"Climbing area: {climbing_area_alias[climbing_area_alias_count]}\")\n print(f\"Temp today: {city['main']['temp']}\")\n print(f\"Humidity: {city['main']['humidity']}\")\n print(f\"Weather: {city['weather'][0]['description']}\\n\") #need ['weather'][0]['description']. The description is item 0 in a list inside 'weather'\n climbing_area_alias_count += 1\n \n conditions_dict = {} #Making a dict to store 'Name': 'Conditions_score' as key,value pair\n \n for city in json_data['list']:\n conditions_dict[city['name']] = (city['main']['temp']*city['main']['humidity'])\n print(f\"Currently, the best conditions are in {min(conditions_dict, key=conditions_dict.get)}.\") \n \n api_log.trace(\"Search successful\")\n\n t_end = time.time()\n api_log.trace(f\"Finished search. Results in {(t_end-t_begin)} seconds\")\n \n except KeyError:\n msg1 = (\"**Key Error** Either the state you entered is not in the database...\")\n msg2 = (\"Or there is a problem in the with/open block in the state_choice() function\")\n\n api_log.warn(msg1 + msg2)\n \n\n\ndef init_logging(filename: str = None):\n level = logbook.TRACE\n# level = logbook.WARNING\n \n if filename: #if a filename is provided as an argument \n logbook.TimedRotatingFileHandler(filename, level=level).push_application() #TimedRotatingFileHandler uses date as part of logging. Pushes this output to the log file\n else: #if a filename is not provided\n logbook.StreamHandler(sys.stdout, level=level).push_application() #Pushes output to stdout (in console) if no log file is specified\n\n msg = f\"Logging Initialized. level: {level}, mode: {'stdout mode' if not filename else 'file mode: ' + filename}\" #gives mode. Either stdout (no filename) or filemode (if filename specified)\n\n logger = logbook.Logger('Startup')\n logger.notice(msg)\n\nif __name__ == '__main__':\n# init_logging('ClimbingWeatherAppLogbook')\n init_logging()\n main()\n", "sub_path": "Days31-33Logging/ClimbingWeatherApp-withLogging.py", "file_name": "ClimbingWeatherApp-withLogging.py", "file_ext": "py", "file_size_in_byte": 6621, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "csv.reader", "line_number": 43, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 58, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 60, "usage_type": "call"}, {"api_name": "logbook.Logger", "line_number": 76, "usage_type": "call"}, {"api_name": "time.time", "line_number": 77, "usage_type": "call"}, {"api_name": "time.time", "line_number": 111, "usage_type": "call"}, {"api_name": "logbook.TRACE", "line_number": 123, "usage_type": "attribute"}, {"api_name": "logbook.TimedRotatingFileHandler", "line_number": 127, "usage_type": "call"}, {"api_name": "logbook.StreamHandler", "line_number": 129, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 129, "usage_type": "attribute"}, {"api_name": "logbook.Logger", "line_number": 133, "usage_type": "call"}]} +{"seq_id": "306078298", "text": "import os, sys, requests, websocket, json, bisect\nimport numpy as np, pandas as pd\nimport pyqtgraph as pg\nfrom pyqtgraph import QtCore, QtGui\nQt = QtCore.Qt\nfrom utils import *\nimport exchanges\n\nOPEN = 0\nHIGH = 1\nLOW = 2\nCLOSE = 3\nVOLUME = 4\n\nCOLOR_TRANSPARENT = (0, 0, 0, 0)\nCOLOR_BLACK = (0, 0, 0)\nCOLOR_GREY = (128, 128, 128)\nCOLOR_RED = (255, 0, 0)\nCOLOR_GREEN = (0, 255, 0)\nCOLOR_BLUE = (0, 0, 255)\nCOLOR_YELLOW = (255, 255, 0)\nCOLOR_ORANGE = (255, 128, 0)\nCOLOR_LIME = (128, 255, 0)\nCOLOR_CYAN = (0, 255, 255)\nCOLOR_MAGENTA = (255, 0, 255)\nCOLOR_WHITE = (255, 255, 255)\nCOLOR_MAHOGANY = (192, 64, 0)\nCOLOR_BLUE_MUNSELL = (0, 147, 175)\n\nRGC_COLORS = [COLOR_RED, COLOR_GREEN, COLOR_CYAN]\nLINE_COLORS = [COLOR_WHITE] + RGC_COLORS + [COLOR_YELLOW, COLOR_MAGENTA, COLOR_ORANGE, COLOR_LIME]\n\ndef penD(color, width=1, **kwds): return dict(color=color, width=width, **kwds)\nCOLOR_BB = COLOR_BLUE_MUNSELL\nCOLOR_KC = COLOR_MAHOGANY\nTA_LIST = dict(\n OHLC = Struct(menu='OHLC'),\n Close = Struct(menu='Close'),\n Orderwall = Struct(menu='&Orderwall'),\n\n Volume = Struct(menu='&Volume', ma=10, maPen=penD(COLOR_CYAN)),\n VolumeDiv = Struct(menu='Volume/Max',\n window=10,\n yTicks=[1., 2.]),\n VolumeSpike = Struct(menu='Volume &Spike Signal'),\n VPT = Struct(menu='Volume-&Price Trend', ma=10, maPen=penD(COLOR_CYAN)),\n Momentum = Struct(menu='&Momentum',\n momentumN=10,\n yTicks=[1.,0.,-1.]),\n\n TrendBars = Struct(menu='Tr&end Bars'),\n TrendChange = Struct(menu='Trend Change'),\n Candlestick = Struct(menu='&Candlestick'),\n PercentageYAxis = Struct(menu='% &Y-Axis'),\n ClampXAxis = Struct(menu='Clamp To &X-Axis'),\n ClampIndicators = Struct(menu='Clamp To Indicators'),\n AlwaysLegend = Struct(menu='Always Show Le&gend'),\n CopyVertically = Struct(menu='Copy Charts Vertically'),\n\n Reversion = Struct(menu='Reversion Signal'),\n Bounce = Struct(menu='Bounce Signal'),\n MA1 = Struct(menu='MA &1', lines=[\n Struct(window=8, pen=penD(COLOR_YELLOW, 1, dash=[7, 3])),\n Struct(window=21, pen=penD(COLOR_ORANGE, 1, dash=[21, 3])),\n ]),\n MA2 = Struct(menu='MA &2', lines=[\n Struct(window=50, pen=penD(COLOR_YELLOW, 2)),\n Struct(window=100, pen=penD(COLOR_ORANGE, 2)),\n ]),\n VWMA1 = Struct(menu='VWMA &1'),\n VWMA2 = Struct(menu='VWMA &2'),\n\n BB = Struct(menu='&Bollinger Bands',\n window=20, stdDevMult=2.5, pen=COLOR_BB),\n BBKCRatio = Struct(menu='BB&/KC Ratio',\n yTicks=[0.5, 1.]),\n Trend = Struct(menu='AK Tren&d'),\n ATR = Struct(menu='Average &True Range',\n window=20, pen=penD(COLOR_RED, 2)),\n KC = Struct(menu='&Keltner Channel',\n atrMult=2., pen=COLOR_KC),\n ADX = Struct(menu='&ADX',\n yTicks=[20,# trend weakness\n 40,# trend strength\n 50 # extremely strong trend\n ],\n pen=penD(COLOR_CYAN, 2),\n penPlusDI=penD(COLOR_GREEN),\n penMinusDI=penD(COLOR_RED)),\n RSI = Struct(menu='&Relative Strength Index',\n window=14,\n yTicks=[30,# oversold\n 70 # overbought\n ],\n pen=penD(COLOR_WHITE, 2)),\n Pulse = Struct(menu='P&ulse',\n momentumMA=5),\n)\n\nclass TAPlot():\n def __init__(self, ta, **kwds):\n if 'name' not in kwds:\n self.name = ta.menu.replace('&', '') if ta else None\n self.extraTA = True\n self.barGraph = False\n self.centerY = False\n self.showValue = True\n self.yColumns = []\n self.yTicks = []\n self.width = 2./3. # Multiplied by timeInterval\n if ta is not None:\n self.__dict__.update(ta.__dict__)\n self.__dict__.update(kwds)\n self.yColumns = wrapList(self.yColumns)\n\ndef floatToIndex(floatIdx):\n return int(round(floatIdx))\n\ndef parseToMarketStruct(string, exchangeAlreadyKnown=None):\n string = string.strip()\n inverse = string.startswith('1/')\n if inverse:\n string = string[2:]\n\n # Cut off any preceeding description, eg: Advanced Micro Devices, Inc. | NASDAQ:AMD\n sp = [s.strip() for s in string.split('|')]\n\n description = sp[0] if len(sp) > 1 else ''\n string = sp[-1]\n\n sp = [s.strip() for s in string.split('/')]\n if exchangeAlreadyKnown:\n exchange = exchangeAlreadyKnown\n else:\n exchange = exchanges.findExchange(sp.pop())\n\n # Symbol itself might have slashes, eg: BTS/BTC / Poloniex\n symbolKey = '/'.join(sp)\n\n # In case of incomplete symbol name\n symbolKey = exchange.findSymbol(symbolKey)\n\n # Cut off the description again, as findSymbol() may have added it back.\n symbolKey = (symbolKey.split('|')[-1]).strip()\n\n return Struct(exchange=exchange,\n symbolKey=symbolKey,\n description=description,\n inverse=inverse,\n timeframe='')\n\nclass DrawPictureItem(pg.GraphicsObject):\n def __init__(self, picture):\n pg.GraphicsObject.__init__(self)\n self.picture = picture\n\n def paint(self, p, *args):\n p.drawPicture(0, 0, self.picture)\n\n def boundingRect(self):\n rect = QtCore.QRectF(self.picture.boundingRect())\n return rect\n\nclass ChartData():\n # Static because this is set a few levels above where downloadAndParse is called.\n cacheSeconds = 0\n\n def __init__(self, market, existingDf=None, existingCD=None):\n if existingDf is None:\n self.df = None\n self.orderbook = []\n else:\n self.df = existingDf\n self.orderbook = existingCD.orderbook\n\n self.timeInterval = 1.\n\n if not market:\n return\n self.__dict__.update(market.__dict__)\n\n if existingDf is not None and self.count():\n self.onDataChange()# We already have a filled ChartData object\n #return # Dont override the interval key - WHY?\n\n if len(self.timeframe) == 1:\n # marketTuple can skip multiple on interval, eg 'h'\n lowest = sys.maxsize\n for timeframe in self.exchange.intervals:\n if timeframe[-1] != self.timeframe:\n continue\n multiple = int(timeframe[:-1])\n lowest = min(lowest, multiple)\n if lowest != sys.maxsize:\n self.timeframe = str(lowest) + self.timeframe\n\n def isOHLC(self):\n return True if self.df is None else all([col in self.df.columns for col in ['open', 'high', 'low', 'close']])\n def hasVolume(self): return 'volume' in self.df.columns and self.df.volume.max() > 0.\n\n def setOHLC(self, tohlcv, columns=None):\n if not tohlcv:\n return\n\n if not columns:\n if len(tohlcv[0]) == 2:\n columns = ['times', 'close']\n else:\n columns = ['times', 'open', 'high', 'low', 'close', 'volume']\n df = self.df = pd.DataFrame(tohlcv, columns=columns)\n\n ohlcCols = ['open', 'high', 'low', 'close']\n for col in ohlcCols:\n if col not in df.columns: continue\n\n if self.inverse:\n df[col] = 1. / df[col]\n\n # Some entries on Google are all 0 except for the close\n df.loc[df[col]==0., col] = df.close\n\n if 'low' in df.columns:\n df.low = df.low.replace([np.inf], 0.) # BTS\n # Yahoo often has currencies upside down\n ohlcCols = df[ohlcCols]\n df.high = ohlcCols.max(axis=1)\n df.low = ohlcCols.min(axis=1)\n\n self.onDataChange()\n\n def market(self):\n return Struct(exchange=self.exchange,\n symbolKey=self.symbolKey,\n description=self.description,\n inverse=self.inverse,\n timeframe=self.timeframe)\n def resampleNew(self, timeframe):# 'D' / 'H'\n df = self.df\n # Convert the integer timestamps in the index to a DatetimeIndex\n # This interprets the integers as seconds since the Epoch.\n df.index = pd.to_datetime(df.times, unit='s')\n\n r = df.resample(rule=timeframe.upper(), closed='right', label='right')\n if self.isOHLC():\n newDf = r.apply(dict(open='first', high='max', low='min', close='last'))\n else:\n newDf = r.apply(dict(close='ohlc'))\n newDf = newDf.close # Remove MultiIndex\n\n newDf['times'] = r.apply(dict(times='first')).times\n if 'volume' in df.columns:\n newDf['volume'] = r.apply(dict(volume='sum')).volume\n\n df.index = range(df.shape[0]) # Restore integer indices\n newDf = newDf[~newDf.times.isnull()]# Remove rows with NANs\n newDf.index = range(newDf.shape[0])\n\n market = self.market()\n market.timeframe = timeframe\n ret = ChartData(market, newDf, self)\n return ret\n\n def appendMinuteData(self, minuteDataToCopy):\n df = self.df\n dtCompare = {'d': lambda t: t.date(),\n 'h': lambda t: (t.date(), t.hour)}[self.timeframe[1]]\n\n dtNow = now()\n #resampled = self.resampleNew(self.timeframe[1].upper())#FIXME\n ohlcv = minuteDataToCopy.calcOHLCForPeriod(dtNow, dtCompare)\n if ohlcv[LOW] == sys.float_info.max:\n return\n\n tohlcv = [timestamp(dtNow)] + ohlcv\n if self.count() and dtCompare(fromtimestamp(df.times.iloc[-1])) == dtCompare(dtNow):\n # Replace last OHLC entry\n df.iloc[-1] = tohlcv\n else:\n # Append OHLC entry\n df.loc[self.count()] = tohlcv\n\n self.onDataChange()\n\n def onDataChange(self):\n df = self.df\n if self.exchange.filterGaps:\n avgInterval = (df.times.iloc[-1] - df.times[0]) / self.count()\n self.timeInterval = avgInterval\n # Yahoo has weekend gaps etc\n self.plotTimes = pd.Series([df.times[0] + avgInterval*i for i in range(self.count())])\n else:\n # Calculate modal average interval, in case data has gaps\n mode = (df.times - df.times.shift(1)).mode()\n self.timeInterval = float(mode[0]) if len(mode) else 0.\n self.plotTimes = pd.Series(df.times)\n self.plotTimes -= self.timeInterval / 2\n\n self.cachedPlotItems = {}\n self.calculatedIndicators = False\n\n def count(self):\n return 0 if self.df is None else len(self.df.times)\n\n def clampIndex(self, floatIdx, inclusive=True):\n idx = floatToIndex(floatIdx)\n idx = max(min(idx, self.count()), 0)\n if inclusive and idx == self.count():\n idx -= 1\n return idx\n\n def clampIndexTime(self, idx):\n return self.df.times[self.clampIndex(idx)]\n\n def findTimeIndex(self, time):\n return self.df.times.searchsorted(time)[0]\n\n def unfilterIndex(self, time):\n ret = int((time - self.df.times[0]) / self.timeInterval)\n return ret\n\n def calcOHLCForPeriod(self, dtForDay, dtCompare):\n df = self.df\n ohlcv = [0.]*(VOLUME+1)\n ohlcv[HIGH] = -sys.float_info.max\n ohlcv[LOW] = sys.float_info.max\n\n for e in range(self.count()):\n time = fromtimestamp(df.times[e])\n if dtCompare(time) != dtCompare(dtForDay):\n continue\n\n if ohlcv[OPEN] == 0.:\n ohlcv[OPEN] = df.open[e]\n ohlcv[CLOSE] = df.close[e]\n ohlcv[HIGH] = max(ohlcv[HIGH], df.high[e])\n ohlcv[LOW] = min(ohlcv[LOW], df.low[e])\n ohlcv[VOLUME] += df.volume[e]\n\n return ohlcv\n\n def calcPulse(self, momentumMA, **unused):\n df = self.df\n idxSignal = []\n idxMomentum = []\n\n self.squeezeDuration = 0\n self.squeezeLast = 0\n self.squeezeState = 'Ended'\n\n momentumMA = df.momentum.rolling(momentumMA, min_periods=0).mean() if momentumMA else df.momentum\n\n slowdownPrediction = abs(df.bbkcRatio.shift(1)) > abs(df.bbkcRatio)\n for e in range(self.count()):\n # Dots for BB squeeze\n if df.bbSqueeze[e]:\n self.squeezeState = 'Squeeze'\n\n if self.squeezeLast:\n self.squeezeLast = 0\n self.squeezeDuration = 0\n\n self.squeezeDuration += 1\n else:\n if self.squeezeState == 'Squeeze':\n self.squeezeState = 'Fired'\n\n self.squeezeLast += 1\n\n # Momentum histogram with faded colors when we are predicting a slowdown.\n idx = 1 if momentumMA[e] >= 0. else 0\n if slowdownPrediction[e]:\n idx += 2\n if self.squeezeState == 'Fired':\n self.squeezeState = 'Ended'\n idxMomentum.append(idx)\n\n self.squeezeState += ' (%i)' % (self.squeezeDuration if self.squeezeState == 'Squeeze' else self.squeezeLast)\n\n return momentumMA, idxSignal, idxMomentum\n\n def calcRSI(self, window, **unused):\n df = self.df\n closeDiff = df.close - df.close.shift(1)\n upPeriods = (closeDiff).where(closeDiff > 0., 0.)\n downPeriods = (-closeDiff).where(closeDiff < 0., 0.)\n\n rs = upPeriods.ewm(window).mean() / downPeriods.ewm(window).mean()\n rsi = 100. - (100. / (1. + rs))\n df['rsi'] = rsi\n\n def calcADX(self, window, **unused):\n df = self.df\n upMove = df.high - df.high.shift(1)\n downMove = df.low.shift(1) - df.low\n\n plusDM = upMove.where((upMove > downMove) & (upMove > 0.), 0.)\n minusDM = downMove.where((downMove > upMove) & (downMove > 0.), 0.)\n\n kwds = dict(window=window, min_periods=0)\n plusDI = 100. * plusDM.rolling(**kwds).mean() / df.atr\n minusDI = 100. * minusDM.rolling(**kwds).mean() / df.atr\n\n finalCalc = abs((plusDI - minusDI) / (plusDI + minusDI))\n finalCalc[0] = 0.\n adx = 100. * finalCalc.rolling(**kwds).mean()\n\n df['adx'] = adx\n df['adxPlusDI'] = plusDI\n df['adxMinusDI'] = minusDI\n\n def calcSignals(self):\n df = self.df\n # Reversion to mean, based on low trend ADX, RSI oversold/bought, outside KC.\n adxTicks = self.taADX[0].yTicks\n rsiTicks = self.taRSI.yTicks\n lowTrend = adxTicks[0]\n medTrend = (adxTicks[0] + adxTicks[1]) / 2.\n overbought = rsiTicks[1]\n oversold = rsiTicks[0]\n df['reversionShort'] = (df.adx < lowTrend) & (df.rsi > overbought) & (df.close > df.kcUpper) #& (df.adxPlusDI < medTrend)\n df['reversionLong'] = (df.adx < lowTrend) & (df.rsi < oversold) & (df.close < df.kcLower) #& (df.adxMinusDI < medTrend)\n\n def calcIndicators(self):\n if not self.count():\n return\n if self.calculatedIndicators:\n return\n self.calculatedIndicators = True\n df = self.df\n\n if 'close' in df.columns or self.isOHLC():\n lineColumns = ['close']\n else:\n lineColumns = df.columns[1:]\n self.taClose = [\n TAPlot(None,\n name=col,\n extraTA=False,\n pen=penD(LINE_COLORS[c % len(LINE_COLORS)],\n dash=[None, [7, 3]][int(c / len(LINE_COLORS))]),\n yColumns=[df[col]])\n for c,col in enumerate(lineColumns)\n ]\n\n if 'close' not in df.columns:\n return\n\n dotCommon = dict(\n pen=None,# disable line drawing between points\n symbol='o',\n symbolSize=8,\n symbolPen=None,\n )\n def SignalTAPlot(series, highLow=None, ta=None, symbolBrush=COLOR_CYAN, **kwds):\n index = df.loc[series].index\n last = int(index[-1]) if len(index) else 0\n lastSignal = self.count() - 1 - last\n\n if highLow is None:\n highLow = df.low if 'low' in df.columns else df.close\n\n return TAPlot(ta,\n lastSignal=lastSignal,\n extraTA=False,\n x=self.plotTimes[series],\n yColumns=highLow[series],\n symbolBrush=symbolBrush,\n **kwds,\n **dotCommon)\n\n kwds = dict(min_periods=0)\n if 'volume' in df.columns:\n ta = TA_LIST['Volume']\n df['upBar'] = df.close >= df.open if 'open' in df.columns else True\n ma = df.volume.rolling(window=ta.ma, **kwds).mean()\n self.taVolume = [\n TAPlot(ta,\n barGraph=True,\n penIdx=df.upBar,\n #penIdx=df.volume > ma,\n yColumns=df.volume),\n TAPlot(ta,\n name='MA %i' % ta.ma,\n pen=ta.maPen,\n yColumns=ma)\n ]\n\n ta = TA_LIST['VolumeDiv']\n self.taVolumeDiv = TAPlot(ta,\n name='Volume/Max(%i)' % ta.window,\n barGraph=True,\n penIdx=df.upBar,\n yColumns=(df.volume/df.volume.shift(1).rolling(ta.window, **kwds).max()).replace([np.inf, np.nan], 0.))\n\n self.taVolumeSpike = SignalTAPlot(\n self.taVolumeDiv.yColumns[0] > self.taVolumeDiv.yTicks[0],\n ta=TA_LIST['VolumeSpike'],\n symbolBrush=COLOR_YELLOW)\n\n ta = TA_LIST['VPT']\n vpt = [0.]*self.count()\n for i in range(1, self.count()):\n vpt[i] = vpt[i-1] + df.volume[i] * (df.close[i]-df.close[i-1])/(df.close[i-1] or 1.)\n df['vpt'] = vpt\n ma = df.vpt.rolling(window=ta.ma, **kwds).mean()\n self.taVPT = [\n TAPlot(ta,\n name='VPT',\n barGraph=True,\n penIdx=df.vpt > ma,\n yColumns=df.vpt),\n TAPlot(ta,\n name='MA %i' % ta.ma,\n pen=ta.maPen,\n yColumns=ma)\n ]\n\n for maSet in range(1, 3):\n l, vwl = [], []\n key = 'MA%i' % maSet\n setattr(self, 'ta' + key, l)\n setattr(self, 'taVW' + key, vwl)\n for ta in TA_LIST[key].lines:\n l.append(TAPlot(ta,\n extraTA=False,\n name='MA %i' % ta.window,\n yColumns=df.close.rolling(ta.window, **kwds).mean()))\n\n if not self.hasVolume():\n continue# Cant do VWMA without volume\n\n ta = ta.copy()\n ta.pen['width'] *= 2\n vwl.append(TAPlot(ta,\n extraTA=False,\n name='VWMA %i' % ta.window,\n yColumns=(df.close * df.volume).rolling(ta.window, **kwds).mean()/\n (df.volume).rolling(ta.window, **kwds).mean()))\n\n ta = TA_LIST['BB']\n std = df.close.rolling(ta.window, **kwds).std()\n std[0] = 0. # Dont want Nan in first entry\n df['bbMean'] = df.close.rolling(ta.window, **kwds).mean()\n band = ta.stdDevMult*std\n df['bbUpper'] = df.bbMean + band\n df['bbLower'] = df.bbMean - band\n df['bbGap'] = 2*band\n self.taBB = TAPlot(ta,\n extraTA=False,\n name='BB %i, %g' % (ta.window, ta.stdDevMult),\n yColumns=[\n df.bbUpper,\n #df.bbMean,\n df.bbLower\n ])\n\n ta = TA_LIST['Trend']\n a = df.close.ewm(span=10).mean()\n b = df.close.ewm(span=50).mean()\n df['akTrend'] = (a - b)# / b\n self.taTrend = TAPlot(ta,\n yColumns=df.akTrend,\n barGraph=True,\n centerY=True,\n pens=[COLOR_RED, COLOR_GREEN],\n brushes=[COLOR_BLACK, COLOR_BLACK])\n df['upTrend'] = df.akTrend > 0.\n\n self.taTrendChange = SignalTAPlot(\n (df.akTrend.shift(1) < 0.) & (df.akTrend > 0.),\n ta=TA_LIST['TrendChange'],\n symbolBrush=COLOR_YELLOW)\n\n if 'high' not in df.columns:\n return\n\n self.taOHLC = TAPlot(None,\n name='OHLC',\n extraTA=False,\n yTicks=[0.],\n yColumns=[df.high, df.low])\n\n df['typicalPrice'] = (df.high + df.low + df.close) / 3.\n if 0:\n trendPrice = df.typicalPrice.rolling(6).mean()\n df['upTrend'] = df.close >= trendPrice\n\n ta = TA_LIST['ATR']\n atrWindow = ta.window\n avgTrueRange = pd.DataFrame()\n avgTrueRange[0] = df.high - df.low\n avgTrueRange[1] = abs(df.high - df.close.shift(1))\n avgTrueRange[2] = abs(df.low - df.close.shift(1))\n avgTrueRange = avgTrueRange.max(axis=1)\n df['atr'] = avgTrueRange.rolling(atrWindow, **kwds).mean()\n self.taATR = TAPlot(ta,\n name='ATR %i' % atrWindow,\n yColumns=df.atr)\n\n ta = TA_LIST['KC']\n df['kcMean'] = df.typicalPrice.rolling(atrWindow, **kwds).mean()\n df['kcUpper'] = df.kcMean+ta.atrMult*df.atr\n df['kcLower'] = df.kcMean-ta.atrMult*df.atr\n self.taKC = TAPlot(ta,\n extraTA=False,\n name='KC %i, %g' % (atrWindow, ta.atrMult),\n yColumns=[\n df.kcUpper,\n #df.kcMean,\n df.kcLower\n ])\n\n kcGap = df.kcUpper - df.kcLower\n df['bbkcRatio'] = df.bbGap / kcGap\n df['bbSqueeze'] = df.bbkcRatio < 1.\n\n ta = TA_LIST['ADX']\n self.calcADX(atrWindow, **ta.__dict__)\n self.taADX = [\n TAPlot(ta,\n name='ADX %i' % atrWindow,\n yColumns=df.adx),\n TAPlot(ta,\n name='+DI %i' % atrWindow,\n pen=ta.penPlusDI,\n yColumns=df.adxPlusDI),\n TAPlot(ta,\n name='-DI %i' % atrWindow,\n pen=ta.penMinusDI,\n yColumns=df.adxMinusDI),\n ]\n\n ta = TA_LIST['RSI']\n self.calcRSI(**ta.__dict__)\n self.taRSI = TAPlot(ta,\n name='RSI %i' % ta.window,\n yColumns=df.rsi)\n\n ta = TA_LIST['Momentum']\n shift = df.close.shift(ta.momentumN)\n momentum = (df.close - shift) / shift\n momentum[momentum.isnull()] = 0.\n df['momentum'] = momentum\n self.taMomentum = TAPlot(ta,\n name='Momentum %i' % ta.momentumN,\n centerY=True,\n barGraph=True,\n yColumns=df.momentum)\n\n ta = TA_LIST['Pulse']\n momentumMA, idxSignal, idxMomentum = self.calcPulse(**ta.__dict__)\n\n def yZeros(xseries): return np.zeros((len(xseries),))\n def yOnes(xseries): return np.ones((len(xseries),))\n bbInX = self.plotTimes[df.bbSqueeze]\n squeezeDots = TAPlot(ta,\n name='BB Inside',\n x=bbInX,\n yColumns=yZeros(bbInX),\n symbolBrush=COLOR_YELLOW,\n **dotCommon)\n signalY = float(momentumMA.max()/20)\n pens = []\n squeezeBar = TAPlot(ta,\n barGraph=True,\n yColumns=signalY,\n y0=-signalY/2,\n width=1/4.,\n penIdx=df.bbSqueeze,\n pens=[COLOR_TRANSPARENT, COLOR_YELLOW])\n momentum = TAPlot(ta,\n name=self.taMomentum.name + ', %i' % ta.momentumMA,\n barGraph=True,\n centerY=True,\n showValue=False,\n width=1.,\n yColumns=momentumMA,\n penIdx=idxMomentum,\n pens=[COLOR_RED, COLOR_CYAN, COLOR_RED, COLOR_CYAN],\n brushes=[COLOR_RED, COLOR_CYAN, [c/3 for c in COLOR_RED], [c/3 for c in COLOR_CYAN]])\n self.taPulse = [momentum, squeezeDots]\n self.taPulseBars = [momentum, squeezeBar] # Cell charts can only use QPicture\n\n ta = TA_LIST['BBKCRatio']\n self.taBBKCRatio = TAPlot(ta,\n yColumns=df.bbkcRatio,\n width=1.,\n barGraph=True,\n pens=[COLOR_BB, COLOR_KC],\n penIdx=df.bbSqueeze)\n\n self.calcSignals()\n self.taReversion = [\n SignalTAPlot(\n df.reversionShort,\n df.high,\n name='Reversion Short',\n symbolBrush=COLOR_RED),\n SignalTAPlot(\n df.reversionLong,\n df.low,\n name='Reversion Long')\n ]\n self.taSqueeze = SignalTAPlot(df.bbSqueeze)\n\n class PicArgs:\n def __init__(self, *args):\n self.maxRows, self.showTrendBars, self.showCandlestick = args\n self.args = args\n self.calcY = True\n self.shiftY = 0.\n self.scaleY = 1.\n\n def createPlotItems(self, taPlots, addToPlot, args):\n ta0 = taPlots[0]\n cacheKey = (ta0.name, *args.args, args.calcY)\n cachedItems = self.cachedPlotItems.get(cacheKey)\n\n startRow = 0\n if args.maxRows:\n startRow = max(0, self.count() - args.maxRows)\n if not cachedItems and args.calcY:\n init = sys.float_info.max\n y_min = init\n y_max = -init\n for ta in taPlots:\n for yColumn in ta.yColumns:\n if type(yColumn) == float: continue\n yColumn = yColumn[startRow:]\n y_min = min(y_min, yColumn.min())\n y_max = max(y_max, yColumn.max())\n\n if ta0.centerY:\n args.scaleY = 1./max(y_max, -y_min)\n else:\n args.scaleY = 1./(y_max - y_min)\n args.shiftY = -y_min\n\n newItems = []\n for t, ta in enumerate(taPlots):\n def addItem(item, col):\n if not addToPlot: return\n addToPlot.addItem(item)\n legend = getattr(addToPlot, 'legend', None)\n if not legend: return\n if ta.name and ta.name != 'OHLC' and type(item) in [pg.BarGraphItem, DrawPictureItem]:\n # Not automatically done for BarGraphItem or DrawPictureItem\n legend.addItem(item, name=ta.name)\n if col:\n # Only allow each indicator to have one line in the legend.\n legend.removeItem(ta.name)\n\n if cachedItems:\n for col, item in enumerate(cachedItems[t]):\n addItem(item, col)\n continue\n\n items = []\n opts = ta.__dict__.copy()\n x = self.plotTimes[startRow:]\n for col, yColumn in enumerate(ta.yColumns):\n item = None\n if ta.name in ['OHLC']:\n item = DrawPictureItem(self.createOHLCPicture(startRow, args))\n item.opts = dict(pen=COLOR_TRANSPARENT)\n if item:\n items.append(item)\n break\n\n if type(yColumn) == float:\n floatY = yColumn\n floatY = 0.1 # Hack: constant size for signal dots\n opts.update(y0=-floatY)#\n else:\n floatY = yColumn[startRow:]\n\n floatY = (args.shiftY + floatY) * args.scaleY\n if type(yColumn) != float:\n floatY = floatY.tolist()\n\n if hasattr(ta, 'x'):\n item = pg.PlotDataItem(y=floatY, **opts)\n else:\n picture = QtGui.QPicture()\n p = QtGui.QPainter(picture)\n if args.maxRows != 0:\n yTicks = ta.yTicks\n if ta.centerY: yTicks = yTicks + [0.]\n p.setPen(pg.mkPen(COLOR_GREY))\n for y in yTicks:\n y = (args.shiftY + y) * args.scaleY\n p.drawLine(QtCore.QPointF(x.iloc[0], y), QtCore.QPointF(x.iloc[-1], y))\n if ta.barGraph:\n penIdx = getattr(ta, 'penIdx', yColumn > 0.)\n brushIdx = getattr(ta, 'brushIdx', penIdx)\n pens = getattr(ta, 'pens', RGC_COLORS)\n brushes = getattr(ta, 'brushes', None) or [[c/2 for c in pen] for pen in pens]\n opts.update(width=ta.width*self.timeInterval,\n x=x,\n pens=[pens[i] for i in penIdx[startRow:]],\n brushes=[brushes[i] for i in brushIdx[startRow:]])\n plot = pg.BarGraphItem(height=floatY, **opts)\n plot.drawPicture()\n p.drawPicture(0, 0, plot.picture)\n else:\n xy = (x.tolist(), floatY)\n plot = pg.PlotDataItem(*xy, **opts)\n plot.curve.paint(p, None, None)\n p.end()\n item = DrawPictureItem(picture)\n item.opts = plot.opts # legend\n items.append(item)\n for col, item in enumerate(items):\n addItem(item, col)\n newItems.append(items)\n\n if not cachedItems:\n self.cachedPlotItems[cacheKey] = newItems\n\n # data must have fields: time, open, close, min, max, volume\n def createOHLCPicture(self, startRow, args):\n df = self.df\n\n picture = QtGui.QPicture()\n p = QtGui.QPainter(picture)\n w = self.timeInterval / 3\n\n if args.maxRows != 0:\n scaledDf = (args.shiftY + pd.DataFrame(df, columns=['open', 'high', 'low', 'close'])) * args.scaleY\n else:\n scaledDf = df\n\n open_v, high_v, low_v, close_v = \\\n scaledDf.open.values, scaledDf.high.values, scaledDf.low.values, scaledDf.close.values\n upTrend_v = df.upTrend.values\n upBar_v = df.upBar.values\n\n pens = [pg.mkPen(color) for color in RGC_COLORS]\n brushes = [pg.mkBrush([c/2 for c in color]) for color in RGC_COLORS]\n for e in range(startRow, self.count()):\n t = self.plotTimes[e]\n open, high, low, close, = open_v[e], high_v[e], low_v[e], close_v[e]\n\n if args.showTrendBars:\n upTrend = upTrend_v[e]\n color = 1 if upTrend else 0\n else:\n color = upBar_v[e]\n p.setPen(pens[color])\n\n if low != high:\n p.drawLine(QtCore.QPointF(t, low), QtCore.QPointF(t, high))\n\n if args.showCandlestick:\n if 0:#args.showTrendBars and upTrend != upBar_v[e]:\n p.setBrush(Qt.black)# Make the body hollow\n else:\n p.setBrush(brushes[color])# Solid body\n\n y = open\n height = close - open\n p.drawRect(QtCore.QRectF(t-w, y, w*2, height))\n else:\n p.drawLine(QtCore.QPointF(t-w, open), QtCore.QPointF(t, open))\n p.drawLine(QtCore.QPointF(t+w, close), QtCore.QPointF(t, close))\n\n p.end()\n return picture\n\n def getTA(self, taName):\n ret = getattr(self, 'ta' + taName, None)\n if not ret: return None\n return wrapList(ret)\n\n def chartCacheKey(self, responseIdx=0):\n return ['charts', self.exchange.name, self.symbolKey, self.timeframe, str(responseIdx)]\n\n def downloadAndParse(self, getOrders=False):\n responses = []\n\n isRecent = False\n if ChartData.cacheSeconds:\n chartKey = self.chartCacheKey(0)\n cached = gCache.get(chartKey)\n isRecent = cached and (now() - cached.time).seconds < ChartData.cacheSeconds\n\n o = Struct(fromCache=False)\n def saveResponse(response):# If None, then we will try to load from the cache instead.\n chartKey = self.chartCacheKey(len(responses))\n\n if response:\n gCache.set(chartKey, response)\n else:\n o.fromCache = True\n cached = gCache.get(chartKey)\n if not cached:\n raise Exception('FAILED request, response not cached')\n response = cached.value\n\n try: response = json.loads(response)\n except: pass# Yahoo uses CSV not json\n\n responses.append(response)\n\n auth = None #requests.auth.HTTPBasicAuth('rpcuser', 'rpcpass')\n\n dataUrls = self.exchange.dataUrls(self, getOrders)\n for urlDict in dataUrls:\n url = urlDict['url']\n post = urlDict.get('post')\n if post:\n # If there are multiple things to post, use websocket\n # websocket doesnt pickup proxy settings from environment, like requests does.\n options = {}\n HTTP_PROXY = os.environ.get('HTTP_PROXY')\n if HTTP_PROXY:\n from urlparse import urlparse\n p = urlparse(HTTP_PROXY)\n options.update(http_proxy_host=p.netloc.split(':')[0], http_proxy_port=p.port)\n\n ws = None\n if not isRecent:\n print(url)\n try: ws = websocket.create_connection(url, **options)\n except: pass\n\n for p in post:\n p = p(responses) if hasattr(p, '__call__') else p\n print(p)\n response = None\n if ws:\n ws.send(json.dumps(p))\n response = ws.recv()\n saveResponse(response)\n\n if ws:\n ws.close()\n else:\n # Non-socket requests\n response = None\n if not isRecent:\n headers = urlDict.get('headers', {})\n data = urlDict.get('data', {})\n try:\n if data:\n print(data)\n # FIXME: move into requestStr\n response = requests.post(url, data=data, headers=headers, auth=auth)\n if response and response.ok:\n response = response.content.decode('utf-8')\n else:\n response = requestStr(url, headers=headers)\n except Exception as e:\n print('REQUEST FAILED:', e)\n\n saveResponse(response)\n\n self.exchange.parseData(self, responses)\n return o.fromCache\n", "sub_path": "chartdata.py", "file_name": "chartdata.py", "file_ext": "py", "file_size_in_byte": 34598, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pyqtgraph.QtCore.Qt", "line_number": 5, "usage_type": "attribute"}, {"api_name": "pyqtgraph.QtCore", "line_number": 5, "usage_type": "name"}, {"api_name": "exchanges.findExchange", "line_number": 135, "usage_type": "call"}, {"api_name": "pyqtgraph.GraphicsObject", "line_number": 152, "usage_type": "attribute"}, {"api_name": "pyqtgraph.GraphicsObject.__init__", "line_number": 154, "usage_type": "call"}, {"api_name": "pyqtgraph.GraphicsObject", "line_number": 154, "usage_type": "attribute"}, {"api_name": "pyqtgraph.QtCore.QRectF", "line_number": 161, "usage_type": "call"}, {"api_name": "pyqtgraph.QtCore", "line_number": 161, "usage_type": "name"}, {"api_name": "sys.maxsize", "line_number": 188, "usage_type": "attribute"}, {"api_name": "sys.maxsize", "line_number": 194, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 210, "usage_type": "call"}, {"api_name": "numpy.inf", "line_number": 223, "usage_type": "attribute"}, {"api_name": "pandas.to_datetime", "line_number": 241, "usage_type": "call"}, {"api_name": "sys.float_info", "line_number": 271, "usage_type": "attribute"}, {"api_name": "pandas.Series", "line_number": 290, "usage_type": "call"}, {"api_name": "pandas.Series", "line_number": 295, "usage_type": "call"}, {"api_name": "sys.float_info", "line_number": 324, "usage_type": "attribute"}, {"api_name": "sys.float_info", "line_number": 325, "usage_type": "attribute"}, {"api_name": "numpy.inf", "line_number": 493, "usage_type": "attribute"}, {"api_name": "numpy.nan", "line_number": 493, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 590, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 653, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 654, "usage_type": "call"}, {"api_name": "sys.float_info", "line_number": 723, "usage_type": "attribute"}, {"api_name": "pyqtgraph.BarGraphItem", "line_number": 746, "usage_type": "attribute"}, {"api_name": "pyqtgraph.PlotDataItem", "line_number": 782, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui.QPicture", "line_number": 784, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 784, "usage_type": "name"}, {"api_name": "pyqtgraph.QtGui.QPainter", "line_number": 785, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 785, "usage_type": "name"}, {"api_name": "pyqtgraph.mkPen", "line_number": 789, "usage_type": "call"}, {"api_name": "pyqtgraph.QtCore.QPointF", "line_number": 792, "usage_type": "call"}, {"api_name": "pyqtgraph.QtCore", "line_number": 792, "usage_type": "name"}, {"api_name": "pyqtgraph.BarGraphItem", "line_number": 802, "usage_type": "call"}, {"api_name": "pyqtgraph.PlotDataItem", "line_number": 807, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui.QPicture", "line_number": 824, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 824, "usage_type": "name"}, {"api_name": "pyqtgraph.QtGui.QPainter", "line_number": 825, "usage_type": "call"}, {"api_name": "pyqtgraph.QtGui", "line_number": 825, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 829, "usage_type": "call"}, {"api_name": "pyqtgraph.mkPen", "line_number": 838, "usage_type": "call"}, {"api_name": "pyqtgraph.mkBrush", "line_number": 839, "usage_type": "call"}, {"api_name": "pyqtgraph.QtCore.QPointF", "line_number": 852, "usage_type": "call"}, {"api_name": "pyqtgraph.QtCore", "line_number": 852, "usage_type": "name"}, {"api_name": "pyqtgraph.QtCore.QRectF", "line_number": 862, "usage_type": "call"}, {"api_name": "pyqtgraph.QtCore", "line_number": 862, "usage_type": "name"}, {"api_name": "pyqtgraph.QtCore.QPointF", "line_number": 864, "usage_type": "call"}, {"api_name": "pyqtgraph.QtCore", "line_number": 864, "usage_type": "name"}, {"api_name": "pyqtgraph.QtCore.QPointF", "line_number": 865, "usage_type": "call"}, {"api_name": "pyqtgraph.QtCore", "line_number": 865, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 900, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 915, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 915, "usage_type": "attribute"}, {"api_name": "urlparse.urlparse", "line_number": 918, "usage_type": "call"}, {"api_name": "websocket.create_connection", "line_number": 924, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 932, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 948, "usage_type": "call"}]} +{"seq_id": "380220979", "text": "from django.conf.urls import url\n\nfrom .views import bloggTest,Hello,Contact,Product,HelloTemplate,ProductStatic,ProductDynamicOneRecord,Products\nfrom .views import BlogData\n\nfrom .views import Contact,Thanks\n\n\nfrom .views import Thanks,StudentInsert\n\nfrom .views import BlogPost,AboutUs\nfrom .views import Home\nurlpatterns = [\n\n url(r'^hello/',Hello),\n url(r'^contact/',Contact),\n url(r'^product/',Product),\n url(r'^hellotemplate/',HelloTemplate),\n url(r'^productstatic/',ProductStatic),\n url(r'^productdynamic/',ProductDynamicOneRecord),\n url(r'^products/',Products),\n\turl(r'^blogdata/',BlogData),\n\turl(r'^bloggtest/',bloggTest),\n url(r'^contact/', Contact, name='contact'),\n url(r'^thanks/',Thanks),\n\turl(r'^studentinsert/',StudentInsert),\n url(r'^postblog/', BlogPost, name='postblog'),\n url(r'^$', Home, name='home'),\n url(r'^aboutus/',AboutUs, name='aboutus'),\n]\n\n\n", "sub_path": "website/blogg/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 907, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.conf.urls.url", "line_number": 15, "usage_type": "call"}, {"api_name": "views.Hello", "line_number": 15, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 16, "usage_type": "call"}, {"api_name": "views.Contact", "line_number": 16, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 17, "usage_type": "call"}, {"api_name": "views.Product", "line_number": 17, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 18, "usage_type": "call"}, {"api_name": "views.HelloTemplate", "line_number": 18, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 19, "usage_type": "call"}, {"api_name": "views.ProductStatic", "line_number": 19, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 20, "usage_type": "call"}, {"api_name": "views.ProductDynamicOneRecord", "line_number": 20, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 21, "usage_type": "call"}, {"api_name": "views.Products", "line_number": 21, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 22, "usage_type": "call"}, {"api_name": "views.BlogData", "line_number": 22, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 23, "usage_type": "call"}, {"api_name": "views.bloggTest", "line_number": 23, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 24, "usage_type": "call"}, {"api_name": "views.Contact", "line_number": 24, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 25, "usage_type": "call"}, {"api_name": "views.Thanks", "line_number": 25, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 26, "usage_type": "call"}, {"api_name": "views.StudentInsert", "line_number": 26, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 27, "usage_type": "call"}, {"api_name": "views.BlogPost", "line_number": 27, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 28, "usage_type": "call"}, {"api_name": "views.Home", "line_number": 28, "usage_type": "argument"}, {"api_name": "django.conf.urls.url", "line_number": 29, "usage_type": "call"}, {"api_name": "views.AboutUs", "line_number": 29, "usage_type": "argument"}]} +{"seq_id": "596747332", "text": "from flask import Flask, request\nimport json\nimport save\napp = Flask(__name__)\n\n@app.route('/build', methods=['POST'])\ndef build():\n list = json.dumps(request.json)\n #returns listJson 1\n return list\n\n@app.route('/save', methods=['POST'])\ndef saveList():\n response = save.save(request.json)\n return \"ok\"\n\nif __name__ == '__main__':\n app.run()", "sub_path": "web.py", "file_name": "web.py", "file_ext": "py", "file_size_in_byte": 359, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "flask.Flask", "line_number": 4, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 8, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 8, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 8, "usage_type": "name"}, {"api_name": "save.save", "line_number": 14, "usage_type": "call"}, {"api_name": "flask.request.json", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}]} +{"seq_id": "388663841", "text": "# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nfrom scrapy.pipelines.images import ImagesPipeline, DropItem\nimport os\nfrom .settings import IMAGES_STORE\nimport scrapy\n\nclass CheaperPipeline(ImagesPipeline):\n def get_media_requests(self, item, info):\n # for image_url in item['image_urls']:\n yield scrapy.Request(item['image_urls'])\n\n def item_completed(self, results, item, info):\n image_paths = [x['path'] for ok, x in results if ok]\n if not image_paths:\n raise DropItem(\"Item contains no images\")\n # 重命名\n title = item['title']\n\n path = IMAGES_STORE + title + '/'\n if title != '' and os.path.exists(path) == False:\n os.mkdir(path)\n dir = path\n os.rename(IMAGES_STORE+image_paths[0],\n dir + item['index'] + '.jpg')\n item['image_paths'] = dir + item['index'] + '.jpg'\n return item\n", "sub_path": "Language/Python/spider/cheaper/cheaper/pipelines.py", "file_name": "pipelines.py", "file_ext": "py", "file_size_in_byte": 1061, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "scrapy.pipelines.images.ImagesPipeline", "line_number": 12, "usage_type": "name"}, {"api_name": "scrapy.Request", "line_number": 15, "usage_type": "call"}, {"api_name": "scrapy.pipelines.images.DropItem", "line_number": 20, "usage_type": "call"}, {"api_name": "settings.IMAGES_STORE", "line_number": 24, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 25, "usage_type": "call"}, {"api_name": "os.path", "line_number": 25, "usage_type": "attribute"}, {"api_name": "os.mkdir", "line_number": 26, "usage_type": "call"}, {"api_name": "os.rename", "line_number": 28, "usage_type": "call"}, {"api_name": "settings.IMAGES_STORE", "line_number": 28, "usage_type": "name"}]} +{"seq_id": "520149725", "text": "# coding:utf-8\nfrom RemoteCreditSystem import db\nfrom RemoteCreditSystem.config import PER_PAGE\nfrom RemoteCreditSystem.config import logger\nimport RemoteCreditSystem.helpers as helpers\nimport datetime\n\nfrom flask import Module, session, request, render_template, redirect, url_for, flash\nfrom flask.ext.login import current_user,login_required\n\nfrom RemoteCreditSystem.models import User\nfrom RemoteCreditSystem.models import Role\nfrom RemoteCreditSystem.models import UserRole\nfrom RemoteCreditSystem.models import Org\n\nfrom RemoteCreditSystem import app\nimport hashlib\n\nimport RemoteCreditSystem.tools.xmlUtil as xmlUtil\n\n# 机构管理\n@app.route('/System/org.page', methods=['GET'])\n@login_required\ndef System_org():\n orgs = Org.query.order_by(\"id\")\n return render_template(\"System/org/org.html\",orgs=orgs)\n \n# 加载树\n@app.route('/System/org/org.json', methods=['GET','POST'])\ndef init_org_tree():\n # 加载所有\n tree = Org.query.order_by(\"id\").all()\n for obj in tree:\n obj.icon = \"/static/img/icon_4.png\"\n return helpers.show_result_content(tree) # 返回json\n\n# 新增机构\n@app.route('/System/new_org.page/', methods=['GET'])\ndef new_org_page(pId):\n return render_template(\"System/org/new_org.html\",pId=pId)\n \n# 新增机构\n@app.route('/System/new_org.json/', methods=['POST'])\ndef new_org_json(pId):\n try:\n chk = Org.query.filter_by(org_name=request.form['org_name']).all()\n if(chk):\n # 消息闪现\n flash('保存失败,机构名重复','error')\n return redirect('System/org.page')\n \n levels = Org.query.filter_by(id=pId).first().levels + 1\n Org(request.form['org_name'],pId,levels).add()\n \n # 事务提交\n db.session.commit()\n # 消息闪现\n flash('保存成功','success')\n except:\n # 回滚\n db.session.rollback()\n logger.exception('exception')\n # 消息闪现\n flash('保存失败','error')\n finally:\n xmlUtil.updateDynDict('org_all')\n return redirect('System/org.page')\n\n# 编辑机构\n@app.route('/System/edit_org.page/', methods=['GET'])\ndef edit_org_page(id):\n org = Org.query.filter_by(id=id).first()\n return render_template(\"System/org/edit_org.html\",org=org)\n\n# 编辑机构\n@app.route('/System/edit_org.json/', methods=['POST'])\ndef edit_org_json(id):\n try:\n chk = Org.query.filter(\"org_name='\"+request.form['org_name']+\"' and id<>\"+str(id)).all()\n if(chk):\n # 消息闪现\n flash('保存失败,机构名重复','error')\n return redirect('System/org.page')\n \n org = Org.query.filter_by(id=id).first()\n org.org_name = request.form['org_name']\n \n # 事务提交\n db.session.commit()\n # 消息闪现\n flash('保存成功','success')\n except:\n # 回滚\n db.session.rollback()\n logger.exception('exception')\n # 消息闪现\n flash('保存失败','error')\n finally:\n xmlUtil.updateDynDict('org_all')\n return redirect('System/org.page')\n \n# 删除机构\n@app.route('/System/delete_org.page/', methods=['GET'])\ndef delete_org_page(id):\n org = Org.query.filter_by(id=id).first()\n return render_template(\"System/org/edit_org.html\",org=org)", "sub_path": "RemoteCreditSystem/views/system/rcs_org.py", "file_name": "rcs_org.py", "file_ext": "py", "file_size_in_byte": 3377, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "RemoteCreditSystem.models.Org.query.order_by", "line_number": 25, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.models.Org.query", "line_number": 25, "usage_type": "attribute"}, {"api_name": "RemoteCreditSystem.models.Org", "line_number": 25, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 26, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.app.route", "line_number": 22, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.app", "line_number": 22, "usage_type": "name"}, {"api_name": "flask.ext.login.login_required", "line_number": 23, "usage_type": "name"}, {"api_name": "RemoteCreditSystem.models.Org.query.order_by", "line_number": 32, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.models.Org.query", "line_number": 32, "usage_type": "attribute"}, {"api_name": "RemoteCreditSystem.models.Org", "line_number": 32, "usage_type": "name"}, {"api_name": "RemoteCreditSystem.helpers.show_result_content", "line_number": 35, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.helpers", "line_number": 35, "usage_type": "name"}, {"api_name": "RemoteCreditSystem.app.route", "line_number": 29, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.app", "line_number": 29, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 40, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.app.route", "line_number": 38, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.app", "line_number": 38, "usage_type": "name"}, {"api_name": "RemoteCreditSystem.models.Org.query.filter_by", "line_number": 46, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.models.Org.query", "line_number": 46, "usage_type": "attribute"}, {"api_name": "RemoteCreditSystem.models.Org", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 49, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 50, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.models.Org.query.filter_by", "line_number": 52, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.models.Org.query", "line_number": 52, "usage_type": "attribute"}, {"api_name": "RemoteCreditSystem.models.Org", "line_number": 52, "usage_type": "name"}, {"api_name": "RemoteCreditSystem.models.Org", "line_number": 53, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 53, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 53, "usage_type": "name"}, {"api_name": "RemoteCreditSystem.db.session.commit", "line_number": 56, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.db.session", "line_number": 56, "usage_type": "attribute"}, {"api_name": "RemoteCreditSystem.db", "line_number": 56, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 58, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.db.session.rollback", "line_number": 61, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.db.session", "line_number": 61, "usage_type": "attribute"}, {"api_name": "RemoteCreditSystem.db", "line_number": 61, "usage_type": "name"}, {"api_name": "RemoteCreditSystem.config.logger.exception", "line_number": 62, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.config.logger", "line_number": 62, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 64, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.tools.xmlUtil.updateDynDict", "line_number": 66, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.tools.xmlUtil", "line_number": 66, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 67, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.app.route", "line_number": 43, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.app", "line_number": 43, "usage_type": "name"}, {"api_name": "RemoteCreditSystem.models.Org.query.filter_by", "line_number": 72, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.models.Org.query", "line_number": 72, "usage_type": "attribute"}, {"api_name": "RemoteCreditSystem.models.Org", "line_number": 72, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 73, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.app.route", "line_number": 70, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.app", "line_number": 70, "usage_type": "name"}, {"api_name": "RemoteCreditSystem.models.Org.query.filter", "line_number": 79, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.models.Org.query", "line_number": 79, "usage_type": "attribute"}, {"api_name": "RemoteCreditSystem.models.Org", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 79, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 79, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 83, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.models.Org.query.filter_by", "line_number": 85, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.models.Org.query", "line_number": 85, "usage_type": "attribute"}, {"api_name": "RemoteCreditSystem.models.Org", "line_number": 85, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 86, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 86, "usage_type": "name"}, {"api_name": "RemoteCreditSystem.db.session.commit", "line_number": 89, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.db.session", "line_number": 89, "usage_type": "attribute"}, {"api_name": "RemoteCreditSystem.db", "line_number": 89, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 91, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.db.session.rollback", "line_number": 94, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.db.session", "line_number": 94, "usage_type": "attribute"}, {"api_name": "RemoteCreditSystem.db", "line_number": 94, "usage_type": "name"}, {"api_name": "RemoteCreditSystem.config.logger.exception", "line_number": 95, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.config.logger", "line_number": 95, "usage_type": "name"}, {"api_name": "flask.flash", "line_number": 97, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.tools.xmlUtil.updateDynDict", "line_number": 99, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.tools.xmlUtil", "line_number": 99, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 100, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.app.route", "line_number": 76, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.app", "line_number": 76, "usage_type": "name"}, {"api_name": "RemoteCreditSystem.models.Org.query.filter_by", "line_number": 105, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.models.Org.query", "line_number": 105, "usage_type": "attribute"}, {"api_name": "RemoteCreditSystem.models.Org", "line_number": 105, "usage_type": "name"}, {"api_name": "flask.render_template", "line_number": 106, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.app.route", "line_number": 103, "usage_type": "call"}, {"api_name": "RemoteCreditSystem.app", "line_number": 103, "usage_type": "name"}]} +{"seq_id": "209585200", "text": "from requests import get\r\nfrom bs4 import BeautifulSoup\r\nfrom bs4 import SoupStrainer\r\nimport lxml\r\nimport html2text\r\nfrom sentiment import get_sentiment\r\n\r\ndef find_song(song_name, artist):\r\n token = \"Bearer token_here\"\r\n genius_search = \"http://api.genius.com/search?q=\" + \"%20\".join(song_name.split(\" \")) + \"%20\" + \"%20\".join(\r\n artist.split(\" \"))\r\n Header = {\r\n \"Authorization\": token\r\n }\r\n res = get(genius_search, headers=Header).json()\r\n return (res['response']['hits'][0]['result']['url'])\r\n\r\ndef get_lyrics(url):\r\n url = find_song(title, artist)\r\n only_div = SoupStrainer('div', {\"class\": \"lyrics\"})\r\n scrape = get(url).text\r\n soup = BeautifulSoup(scrape, 'lxml', parse_only=only_div)\r\n return (html2text.html2text(soup.text))\r\n\r\nlyrics = get_lyrics(find_song(title, artist))\r\n\r\nprint(get_sentiment(lyrics))", "sub_path": "rest/Genius_Scraper.py", "file_name": "Genius_Scraper.py", "file_ext": "py", "file_size_in_byte": 863, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "requests.get", "line_number": 15, "usage_type": "call"}, {"api_name": "bs4.SoupStrainer", "line_number": 20, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 21, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 22, "usage_type": "call"}, {"api_name": "html2text.html2text", "line_number": 23, "usage_type": "call"}, {"api_name": "sentiment.get_sentiment", "line_number": 27, "usage_type": "call"}]} +{"seq_id": "102836192", "text": "\"\"\"Field visualizer (polarizing miscroscope simulator)\"\"\"\n\nfrom __future__ import division, print_function, absolute_import\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider\nfrom matplotlib.image import imsave\nimport scipy.ndimage as nd\n\n#from dtmm.project import projection_matrix, project\nfrom dtmm.color import load_tcmf, specter2color\nfrom dtmm.diffract import diffract, field_diffraction_matrix\nfrom dtmm.polarization import mode_polarizer, ray_polarizer, normal_polarizer\nfrom dtmm.field import field2specter\nfrom dtmm.wave import k0\nfrom dtmm.data import refind2eps\nfrom dtmm.conf import BETAMAX, CDTYPE\n\nfrom dtmm.linalg import dotmf, dotmm\nfrom dtmm.fft import fft2, ifft2\n\n#: settable viewer parameters\nVIEWER_PARAMETERS = (\"focus\",\"analyzer\", \"polarizer\", \"sample\", \"intensity\")\n\n\ndef _redim(a, ndim=1):\n \"\"\"Reshapes dimensions of input array by flattenig over first few dimensions. If\n ndim is larger than input array ndim, it adds new axes to input array.\n \n >>> a = np.zeros((4,5,6,7))\n >>> _redim(a,ndim = 3).shape\n (20,6,7)\n >>> _redim(a,ndim = 5).shape\n (1,4,5,6,7)\n \n \"\"\"\n n = a.ndim - ndim \n old_shape = a.shape\n if n < 0:\n new_shape = (1,)*abs(n) + old_shape\n else:\n new_shape = (np.multiply.reduce(old_shape[0:n+1]),) + old_shape[n+1:]\n return a.reshape(new_shape)\n\ndef bulk_viewer(field_data, cmf = None, window = None, **parameters):\n \"\"\"Returns a FieldViewer object for optical microsope simulation\n \n Parameters\n ----------\n field_data : field data tuple\n Input field data\n cmf : str, ndarray or None, optional\n Color matching function (table). If provided as an array, it must match \n input field wavelengths. If provided as a string, it must match one of \n available CMF names or be a valid path to tabulated data. See load_tcmf.\n window : ndarray, optional\n Window function by which the calculated field is multiplied. This can \n be used for removing artefact from the boundaries.\n parameters : kwargs, optional\n Extra parameters passed directly to the :meth:`FieldViewer.set_parameters`\n \n Returns\n -------\n out : viewer\n A :class:`BulkViewer` viewer object \n \n \"\"\" \n return field_viewer(field_data, cmf, bulk_data = True, window = window, **parameters)\n\ndef field_viewer(field_data, cmf = None, bulk_data = False, n = 1., mode = None,\n window = None, diffraction = True, polarization = \"normal\", betamax = BETAMAX, **parameters):\n \"\"\"Returns a FieldViewer object for optical microsope simulation\n \n Parameters\n ----------\n field_data : field data tuple\n Input field data\n cmf : str, ndarray or None, optional\n Color matching function (table). If provided as an array, it must match \n input field wavelengths. If provided as a string, it must match one of \n available CMF names or be a valid path to tabulated data. See load_tcmf.\n n : float, optional\n Refractive index of the output material.\n mode : [ 't' | 'r' | None], optional\n Viewer mode 't' for transmission mode, 'r' for reflection mode None for\n as is data (no projection calculation - default).\n window : ndarray, optional\n Window function by which the calculated field is multiplied. This can \n be used for removing artefact from the boundaries.\n diffraction : bool, optional\n Specifies whether field is treated as diffractive field or not (if it\n was calculated by diffraction > 0 algorithm or not). If set to False\n refocusing is disabled.\n polarization : str, optional\n Defines polarization mode. That is, how the polarization of the light is\n treated after passing the analyzer. By default, polarizer is applied\n in real space (`mode`) which is good for normal (or mostly normal) \n incidence light. You can use `mode` instead of `normal` for more \n accurate, but slower computation. Here polarizers are applied to \n mode coefficients in fft space. \n betamax : float\n Betamax parameter used in the diffraction calculation function. With this\n you can simulate finite NA of the microscope (NA = betamax).\n parameters : kwargs, optional\n Extra parameters passed directly to the :meth:`FieldViewer.set_parameters`\n \n Returns\n -------\n out : viewer\n A :class:`FieldViewer` or :class:`BulkViewer` viewer object \n \n \"\"\"\n field, wavelengths, pixelsize = field_data\n wavenumbers = k0(wavelengths, pixelsize)\n \n if diffraction == False and mode is not None:\n import warnings\n warnings.warn(\"Diffraction has been enabled because projection mode is set!\")\n diffraction = True\n pmodes = (\"mode\",\"normal\")\n if polarization not in pmodes :\n raise ValueError(\"Unknown polarization mode, should be one of {}\".format(repr(pmodes)))\n \n if cmf is None:\n cmf = load_tcmf(wavelengths)\n elif isinstance(cmf, str):\n cmf = load_tcmf(wavelengths, cmf = cmf)\n if bulk_data == False:\n if field.ndim < 4:\n raise ValueError(\"Incompatible field shape\")\n viewer = FieldViewer(field, wavenumbers, cmf, mode = mode, n = n,\n window = window, diffraction = diffraction, polarization = polarization, betamax = betamax)\n \n viewer.set_parameters(**parameters)\n else:\n if field.ndim < 5:\n raise ValueError(\"Incompatible field shape\")\n parameters.setdefault(\"focus\", 0)\n viewer = BulkViewer(field, wavenumbers, cmf, mode = mode, n = n,\n window = window, diffraction = diffraction, polarization = polarization, betamax = betamax)\n viewer.set_parameters(**parameters) \n return viewer\n\ndef _float_or_none(value):\n return float(value) if value is not None else None\n \n\nclass FieldViewer(object): \n \"\"\"Base viewer\"\"\" \n _updated_parameters = set()\n _focus = None\n _polarizer = None\n _sample = None\n _analyzer = None\n _intensity = 1.\n _parameters = VIEWER_PARAMETERS\n _fmin = 0\n _fmax = 100\n ofield = None\n gamma = True\n gray = False\n \n def __init__(self,field,ks,cmf, mode = None,n = 1., polarization = \"normal\",\n window = None, diffraction = True, betamax = BETAMAX):\n self.betamax = betamax\n self.diffraction = diffraction\n self.pmode = polarization\n self.mode = mode \n self.epsv = refind2eps([n,n,n])\n self.epsa = np.array([0.,0.,0.])\n self.ks = ks\n self.ifield = field \n self._ffield = None\n self.window = window\n self.cmf = cmf\n self.dmat = None\n \n @property\n def _default_fmin(self):\n return self.focus - 100\n \n @property\n def _default_fmax(self):\n return self.focus + 100 \n \n @property\n def focus(self):\n \"\"\"Focus position, relative to the calculated field position.\"\"\"\n return self._focus \n \n @property\n def ffield(self):\n if self._ffield is None:\n self._ffield = fft2(self.ifield)\n return self._ffield\n\n @focus.setter \n def focus(self, z):\n if self.diffraction == True or z is None:\n self._focus = _float_or_none(z)\n self._updated_parameters.add(\"focus\")\n else:\n raise ValueError(\"Cannot set focus of a non-diffractive field.\")\n\n @property\n def sample(self):\n \"\"\"Sample rotation angle.\"\"\"\n return self._sample\n \n @sample.setter \n def sample(self, angle):\n self._sample = _float_or_none(angle)\n self._updated_parameters.add(\"sample\") \n\n @property\n def polarizer(self):\n \"\"\"Polarizer rotation angle.\"\"\"\n return self._polarizer\n \n @polarizer.setter \n def polarizer(self, angle):\n if angle is not None and self.ifield.ndim >= 5 and self.ifield.shape[-5] != 2:\n raise ValueError(\"Cannot set polarizer. Incompatible field shape.\")\n self._polarizer = _float_or_none(angle)\n self._updated_parameters.add(\"polarizer\")\n \n @property\n def analyzer(self):\n \"\"\"Analyzer angle\"\"\"\n return self._analyzer \n \n @analyzer.setter \n def analyzer(self, angle):\n self._analyzer = _float_or_none(angle)\n self._updated_parameters.add(\"analyzer\")\n \n @property\n def intensity(self):\n \"\"\"Input light intensity\"\"\"\n return self._intensity \n \n @intensity.setter \n def intensity(self, intensity):\n self._intensity = _float_or_none(intensity)\n self._updated_parameters.add(\"intensity\")\n \n def set_parameters(self, **kwargs):\n \"\"\"Sets viewer parameters. Any of the :attr:`.VIEWER_PARAMETERS`\n \"\"\"\n for key, value in kwargs.items():\n if key in self._parameters:\n setattr(self, key, value) \n else:\n raise TypeError(\"Unexpected keyword argument '{}'\".format(key))\n def get_parameters(self):\n \"\"\"Returns viewer parameters as dict\"\"\"\n return {name : getattr(self,name) for name in VIEWER_PARAMETERS}\n \n def plot(self, ax = None, show_sliders = True, **kwargs):\n \"\"\"Plots field intensity profile. You can set any of the below listed\n arguments. Additionaly, you can set any argument that imshow of\n matplotlib uses (e.g. 'interpolation = \"sinc\"').\n \n Parameters\n ----------\n fmin : float, optional\n Minimimum value for focus setting.\n fmax : float, optional\n Maximum value for focus setting. \n imin : float, optional\n Minimimum value for intensity setting.\n imax : float, optional\n Maximum value for intensity setting. \n pmin : float, optional\n Minimimum value for polarizer angle.\n pmax : float, optional\n Maximum value for polarizer angle. \n smin : float, optional\n Minimimum value for sample rotation angle.\n smax : float, optional\n Maximum value for sample rotation angle. \n amin : float, optional\n Minimimum value for analyzer angle.\n amax : float, optional\n Maximum value for analyzer angle. \n \"\"\"\n\n self.fig = plt.figure() if ax is None else ax.figure\n self.ax = self.fig.add_subplot(111) if ax is None else ax\n \n plt.subplots_adjust(bottom=0.25) \n self.calculate_image()\n \n if show_sliders:\n\n def update_sample(d):\n self.sample = d\n self.update_plot()\n \n def update_focus(d):\n self.focus = d\n self.update_plot()\n \n def update_intensity(d):\n self.intensity = d\n self.update_plot()\n \n def update_analyzer(d):\n self.analyzer = d\n self.update_plot()\n \n def update_polarizer(d):\n self.polarizer = d\n self.update_plot()\n \n axes = [[0.25, 0.14, 0.65, 0.03],\n [0.25, 0.11, 0.65, 0.03],\n [0.25, 0.08, 0.65, 0.03],\n [0.25, 0.05, 0.65, 0.03],\n [0.25, 0.02, 0.65, 0.03]]\n \n if self.intensity is not None:\n self.axintensity = plt.axes(axes.pop())\n self._sintensity = Slider(self.axintensity, \"intensity\",kwargs.pop(\"imin\",0),kwargs.pop(\"imax\",max(10,self.intensity)),valinit = self.intensity, valfmt='%.1f')\n self._ids5 = self._sintensity.on_changed(update_intensity)\n if self.polarizer is not None:\n self.axpolarizer = plt.axes(axes.pop())\n self._spolarizer = Slider(self.axpolarizer, \"polarizer\",kwargs.pop(\"pmin\",0),kwargs.pop(\"pmax\",90),valinit = self.polarizer, valfmt='%.1f')\n self._ids4 = self._spolarizer.on_changed(update_polarizer) \n if self.sample is not None:\n self.axsample = plt.axes(axes.pop())\n self._ssample = Slider(self.axsample, \"sample\",kwargs.pop(\"smin\",-180),kwargs.pop(\"smax\",180),valinit = self.sample, valfmt='%.1f')\n self._ids3 = self._ssample.on_changed(update_sample) \n if self.analyzer is not None:\n self.axanalyzer = plt.axes(axes.pop())\n self._sanalyzer = Slider(self.axanalyzer, \"analyzer\",kwargs.pop(\"amin\",0),kwargs.pop(\"amax\",90),valinit = self.analyzer, valfmt='%.1f')\n self._ids2 = self._sanalyzer.on_changed(update_analyzer)\n if self.focus is not None: \n self.axfocus = plt.axes(axes.pop())\n self._sfocus = Slider(self.axfocus, \"focus\",kwargs.pop(\"fmin\",self._default_fmin),kwargs.pop(\"fmax\",self._default_fmax),valinit = self.focus, valfmt='%.1f')\n self._ids1 = self._sfocus.on_changed(update_focus)\n \n self.axim = self.ax.imshow(self.image, origin = kwargs.pop(\"origin\",\"lower\"), **kwargs)\n \n return self.ax.figure, self.ax\n\n def calculate_specter(self, recalc = False, **params):\n \"\"\"Calculates field specter.\n \n Parameters\n ----------\n recalc : bool, optional\n If specified, it forces recalculation. Otherwise, result is calculated\n only if calculation parameters have changed.\n params: kwargs, optional\n Any additional keyword arguments that are passed dirrectly to \n set_parameters method.\n \"\"\"\n if self.pmode == \"mode\":\n return self._calculate_specter_mode(recalc = recalc, **params)\n else:\n return self._calculate_specter_normal(recalc = recalc, **params)\n \n def _calculate_specter_normal(self, recalc = False, **params):\n \"\"\"Calculates field specter.\n \n Parameters\n ----------\n recalc : bool, optional\n If specified, it forces recalculation. Otherwise, result is calculated\n only if calculation parameters have changed.\n params: kwargs, optional\n Any additional keyword arguments that are passed dirrectly to \n set_parameters method.\n \"\"\"\n self.set_parameters(**params)\n if self.ofield is None:\n recalc = True #first time only trigger calculation \n if recalc or \"focus\" in self._updated_parameters:\n if self.diffraction == True or self.mode is not None:\n #if mode is selected, we need to project the filed using diffraction\n d = 0 if self.focus is None else self.focus\n dmat = field_diffraction_matrix(self.ifield.shape[-2:], self.ks, d = d, \n epsv = self.epsv, epsa = self.epsa, \n mode = self.mode, betamax = self.betamax)\n \n self.ofield = diffract(self.ifield,dmat,window = self.window,out = self.ofield)\n else:\n #no diffraction at all..\n if self.window is not None:\n self.ofield = self.ifield * self.window\n else:\n self.ofield = self.ifield.copy()\n recalc = True\n if recalc or \"polarizer\" in self._updated_parameters or \"analyzer\" in self._updated_parameters or \"sample\" in self._updated_parameters:\n sample = self.sample\n if sample is None:\n sample = 0.\n if self.polarizer is None:\n tmp = _redim(self.ofield, ndim = 5)\n out = np.empty_like(tmp[0])\n else:\n angle = -np.pi/180*(self.polarizer - sample)\n c,s = np.cos(angle),np.sin(angle) \n tmp = _redim(self.ofield, ndim = 6)\n out = np.empty_like(tmp[0,0])\n if self.analyzer is not None:\n \n angle = -np.pi/180*(self.analyzer - sample)\n #pmat = linear_polarizer(angle)\n pmat = normal_polarizer((np.cos(angle),np.sin(angle)))\n #pmat = ray_polarizer((np.cos(angle),np.sin(angle)),epsv = self.epsv, epsa = self.epsa)\n \n for i,data in enumerate(tmp):\n if self.polarizer is not None:\n x = data[0]*c\n y = np.multiply(data[1], s, out = out)\n ffield = np.add(x,y, out = out)#numexpr.evaluate(\"x*c+y*s\", out = out)\n else: \n ffield = data\n \n if self.analyzer is not None:\n #pfield = apply_jones_matrix(pmat, ffield, out = out)\n pfield = dotmf(pmat, ffield, out = out)\n else:\n pfield = ffield\n if i == 0:\n self.specter = field2specter(pfield) \n else:\n self.specter += field2specter(pfield) \n recalc = True\n \n if recalc or \"intensity\" in self._updated_parameters:\n self._updated_parameters.clear()\n self._updated_parameters.add(\"intensity\") #trigger calculate_image call\n else:\n self._updated_parameters.clear()\n return self.specter\n \n def _has_parameter_updated(self, *params):\n for p in params:\n if p in self._updated_parameters:\n return True\n return False\n\n def _calculate_specter_mode(self, recalc = False, **params):\n self.set_parameters(**params)\n if self.ofield is None:\n recalc = True #first time only trigger calculation \n if recalc or self._has_parameter_updated(\"sample\", \"polarizer\"):\n sample = self.sample if self.sample is not None else 0.\n if self.polarizer is not None:\n if self.ffield is None:\n self.ffield = fft2(self.ifield)\n angle = -np.pi/180*(self.polarizer - sample) \n c,s = np.cos(angle),np.sin(angle) \n \n self.data = _redim(self.ffield, ndim = 6)\n x = c*self.data[:,0]\n y = s*self.data[:,1]\n self.data = x+y\n else:\n self.data = _redim(self.ffield, ndim = 5)\n \n if recalc or self._has_parameter_updated(\"focus\"):\n if self.diffraction == True or self.mode is not None:\n #if mode is selected, we need to project the field using diffraction\n d = 0 if self.focus is None else self.focus\n self.dmat = field_diffraction_matrix(self.ifield.shape[-2:], self.ks, d = d, \n epsv = self.epsv, epsa = self.epsa, \n mode = self.mode, betamax = self.betamax)\n else:\n self.dmat = np.asarray(np.diag((1,1,1,1)), CDTYPE) \n if recalc or self._has_parameter_updated(\"analyzer\", \"sample\") :\n sample = self.sample if self.sample is not None else 0.\n if self.analyzer is not None:\n angle = -np.pi/180*(self.analyzer - sample)\n c,s = np.cos(angle),np.sin(angle) \n self.pmat = mode_polarizer(self.ifield.shape[-2:], self.ks, jones = (c,s),\n epsv = self.epsv, epsa = self.epsa, \n betamax = self.betamax) \n else:\n self.pmat = None\n\n if recalc or self._has_parameter_updated(\"analyzer\", \"sample\", \"polarizer\", \"focus\", \"intensity\") :\n tmat = None\n if self.pmat is not None and self.dmat is not None:\n tmat = dotmm(self.pmat,self.dmat)\n if self.pmat is None and self.dmat is not None:\n tmat = self.dmat\n if self.pmat is not None and self.dmat is None:\n tmat = self.pmat\n if tmat is not None:\n self.ofield = dotmf(tmat, self.data, out = self.ofield)\n self.ofield = ifft2(self.ofield, out = self.ofield)\n \n for i,data in enumerate(self.ofield):\n if i == 0:\n self.specter = field2specter(data) \n else:\n self.specter += field2specter(data) \n \n recalc = True\n\n \n if recalc or \"intensity\" in self._updated_parameters:\n self._updated_parameters.clear()\n self._updated_parameters.add(\"intensity\") #trigger calculate_image call\n else:\n self._updated_parameters.clear() \n \n return self.specter\n \n def calculate_image(self, recalc = False, **params):\n \"\"\"Calculates RGB image.\n \n Parameters\n ----------\n recalc : bool, optional\n If specified, it forces recalculation. Otherwise, result is calculated\n only if calculation parameters have changed.\n params: keyword arguments\n Any additional keyword arguments that are passed dirrectly to \n set_parameters method.\n \n \"\"\" \n specter = self.calculate_specter(recalc,**params)\n if recalc or \"intensity\" in self._updated_parameters:\n if self.intensity is not None:\n if self.mode == \"r\":\n norm = -1./self.intensity\n else:\n norm = 1./self.intensity\n self.image = specter2color(specter,self.cmf, norm = norm, gamma = self.gamma, gray = self.gray) \n else:\n if self.mode == \"r\":\n self.image = specter2color(specter,self.cmf, norm = -1., gamma = self.gamma, gray = self.gray) \n else:\n self.image = specter2color(specter,self.cmf, gamma = self.gamma, gray = self.gray) \n if self.sample != 0 and self.sample is not None:\n self.image = nd.rotate(self.image, self.sample, reshape = False, order = 1) \n self._updated_parameters.clear()\n return self.image\n \n def save_image(self, fname, origin = \"lower\", **kwargs):\n \"\"\"Calculates and saves image to file using matplotlib.image.imsave.\n \n Parameters\n ----------\n fname : str\n Output filename or file object.\n origin : [ 'upper' | 'lower' ]\n Indicates whether the (0, 0) index of the array is in the upper left \n or lower left corner of the axes. Defaults to 'lower' \n kwargs : optional\n Any extra keyword argument that is supported by matplotlib.image.imsave\n \"\"\"\n im = self.calculate_image()\n imsave(fname, im, origin = origin, **kwargs)\n\n def update_plot(self):\n \"\"\"Triggers plot redraw\"\"\"\n self.calculate_image()\n self.axim.set_data(self.image)\n self.fig.canvas.draw_idle() \n \n def show(self):\n \"\"\"Shows plot\"\"\"\n plt.show()\n\n \nclass BulkViewer(FieldViewer):\n @property\n def _default_fmin(self):\n return 0\n \n @property\n def _default_fmax(self):\n return len(self.ifield) -1 \n\n @property\n def ffield(self):\n if self._ffield is None:\n self._ffield = fft2(self.ifield[self.focus])\n return self._ffield\n\n @ffield.setter\n def ffield(self, value):\n self._ffield = value\n \n @property\n def focus(self):\n \"\"\"Focus position, relative to the calculated field position.\"\"\"\n return self._focus \n \n @focus.setter \n def focus(self, z):\n self._focus = int(z)\n self._updated_parameters.add(\"focus\")\n \n def _calculate_specter_mode(self, recalc = False, **params):\n self.set_parameters(**params)\n\n if self.ofield is None:\n recalc = True #first time only trigger calculation \n \n if recalc or self._has_parameter_updated(\"analyzer\", \"sample\") :\n sample = self.sample if self.sample is not None else 0.\n if self.analyzer is not None:\n angle = -np.pi/180*(self.analyzer - sample)\n c,s = np.cos(angle),np.sin(angle) \n self.pmat = mode_polarizer(self.ifield.shape[-2:], self.ks, jones = (c,s),\n epsv = self.epsv, epsa = self.epsa, \n betamax = self.betamax) \n if self.pmode != \"mode\":\n self.pmat = self.pmat[...,0:1,0:1,:,:]\n\n\n if recalc or self._has_parameter_updated(\"focus\"):\n if self.mode is None:\n self.ffield = fft2(self.ifield[self.focus])\n else:\n self.dmat = field_diffraction_matrix(self.ifield.shape[-2:], self.ks, d = 0, \n epsv = self.epsv, epsa = self.epsa, \n mode = self.mode, betamax = self.betamax)\n self.ffield = fft2(self.ifield[self.focus])\n\n recalc = True #trigger update of self.data\n\n if recalc or self._has_parameter_updated(\"sample\", \"polarizer\"):\n sample = self.sample if self.sample is not None else 0.\n if self.polarizer is not None:\n angle = -np.pi/180*(self.polarizer - sample) \n c,s = np.cos(angle),np.sin(angle) \n \n self.data = _redim(self.ffield, ndim = 6)\n x = c*self.data[:,0]\n y = s*self.data[:,1]\n self.data = x+y\n else:\n self.data = _redim(self.ffield, ndim = 5)\n \n\n if recalc or self._has_parameter_updated(\"analyzer\", \"sample\", \"polarizer\", \"focus\", \"intensity\") :\n if self.dmat is not None:\n pmat = dotmm(self.pmat,self.dmat)\n else:\n pmat = self.pmat\n self.ofield = dotmf(pmat, self.data, out = self.ofield)\n self.ofield = ifft2(self.ofield, out = self.ofield)\n \n for i,data in enumerate(self.ofield):\n if i == 0:\n self.specter = field2specter(data) \n else:\n self.specter += field2specter(data) \n recalc = True\n\n \n if recalc or \"intensity\" in self._updated_parameters:\n self._updated_parameters.clear()\n self._updated_parameters.add(\"intensity\") #trigger calculate_image call\n else:\n self._updated_parameters.clear() \n \n return self.specter\n \n \n def _calculate_specter_normal(self, recalc = False, **params):\n self.set_parameters(**params)\n if self.ofield is None:\n recalc = True #first time only trigger calculation \n if recalc or \"focus\" in self._updated_parameters:\n if self.mode is None:\n self.ofield = self.ifield[self.focus]\n else:\n dmat = field_diffraction_matrix(self.ifield.shape[-2:], self.ks, d = 0, \n epsv = self.epsv, epsa = self.epsa, \n mode = self.mode, betamax = self.betamax)\n self.ofield = diffract(self.ifield[self.focus],dmat,window = self.window,out = self.ofield)\n recalc = True\n\n if recalc or \"polarizer\" in self._updated_parameters or \"analyzer\" in self._updated_parameters or \"sample\" in self._updated_parameters:\n sample = self.sample\n if sample is None:\n sample = 0.\n if self.polarizer is None:\n tmp = _redim(self.ofield, ndim = 5)\n out = np.empty_like(tmp[0])\n else:\n angle = -np.pi/180*(self.polarizer - sample)\n c,s = np.cos(angle),np.sin(angle) \n tmp = _redim(self.ofield, ndim = 6)\n out = np.empty_like(tmp[0,0])\n if self.analyzer is not None:\n angle = -np.pi/180*(self.analyzer - sample)\n #pmat = linear_polarizer(angle)\n pmat = ray_polarizer((np.cos(angle),np.sin(angle)),epsv = self.epsv, epsa = self.epsa)\n \n \n for i,data in enumerate(tmp):\n if self.polarizer is not None:\n x = data[0]*c\n y = np.multiply(data[1], s, out = out)\n ffield = np.add(x,y, out = out)#numexpr.evaluate(\"x*c+y*s\", out = out)\n else: \n ffield = data\n \n if self.analyzer is not None:\n pfield = dotmf(pmat, ffield, out = out)\n else:\n pfield = ffield\n if i == 0:\n self.specter = field2specter(pfield) \n else:\n self.specter += field2specter(pfield) \n recalc = True\n \n if recalc or \"intensity\" in self._updated_parameters:\n self._updated_parameters.clear()\n self._updated_parameters.add(\"intensity\") #trigger calculate_image call\n else:\n self._updated_parameters.clear()\n return self.specter\n \n\n \n\n ", "sub_path": "dtmm/field_viewer.py", "file_name": "field_viewer.py", "file_ext": "py", "file_size_in_byte": 29584, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "numpy.multiply.reduce", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 43, "usage_type": "attribute"}, {"api_name": "dtmm.conf.BETAMAX", "line_number": 72, "usage_type": "name"}, {"api_name": "dtmm.wave.k0", "line_number": 115, "usage_type": "call"}, {"api_name": "warnings.warn", "line_number": 119, "usage_type": "call"}, {"api_name": "dtmm.color.load_tcmf", "line_number": 126, "usage_type": "call"}, {"api_name": "dtmm.color.load_tcmf", "line_number": 128, "usage_type": "call"}, {"api_name": "dtmm.conf.BETAMAX", "line_number": 165, "usage_type": "name"}, {"api_name": "dtmm.data.refind2eps", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 171, "usage_type": "call"}, {"api_name": "dtmm.fft.fft2", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 289, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 289, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 292, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 292, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 324, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 324, "usage_type": "name"}, {"api_name": "matplotlib.widgets.Slider", "line_number": 325, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 328, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 328, "usage_type": "name"}, {"api_name": "matplotlib.widgets.Slider", "line_number": 329, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 332, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 332, "usage_type": "name"}, {"api_name": "matplotlib.widgets.Slider", "line_number": 333, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 336, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 336, "usage_type": "name"}, {"api_name": "matplotlib.widgets.Slider", "line_number": 337, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.axes", "line_number": 340, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 340, "usage_type": "name"}, {"api_name": "matplotlib.widgets.Slider", "line_number": 341, "usage_type": "call"}, {"api_name": "dtmm.diffract.field_diffraction_matrix", "line_number": 384, "usage_type": "call"}, {"api_name": "dtmm.diffract.diffract", "line_number": 388, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 402, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 404, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 405, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 405, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 407, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 410, "usage_type": "attribute"}, {"api_name": "dtmm.polarization.normal_polarizer", "line_number": 412, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 412, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 412, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 418, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 419, "usage_type": "call"}, {"api_name": "dtmm.linalg.dotmf", "line_number": 425, "usage_type": "call"}, {"api_name": "dtmm.field.field2specter", "line_number": 429, "usage_type": "call"}, {"api_name": "dtmm.field.field2specter", "line_number": 431, "usage_type": "call"}, {"api_name": "dtmm.fft.fft2", "line_number": 455, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 456, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 457, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 457, "usage_type": "call"}, {"api_name": "dtmm.diffract.field_diffraction_matrix", "line_number": 470, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 474, "usage_type": "call"}, {"api_name": "dtmm.conf.CDTYPE", "line_number": 474, "usage_type": "argument"}, {"api_name": "numpy.diag", "line_number": 474, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 478, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 479, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 479, "usage_type": "call"}, {"api_name": "dtmm.polarization.mode_polarizer", "line_number": 480, "usage_type": "call"}, {"api_name": "dtmm.linalg.dotmm", "line_number": 489, "usage_type": "call"}, {"api_name": "dtmm.linalg.dotmf", "line_number": 495, "usage_type": "call"}, {"api_name": "dtmm.fft.ifft2", "line_number": 496, "usage_type": "call"}, {"api_name": "dtmm.field.field2specter", "line_number": 500, "usage_type": "call"}, {"api_name": "dtmm.field.field2specter", "line_number": 502, "usage_type": "call"}, {"api_name": "dtmm.color.specter2color", "line_number": 535, "usage_type": "call"}, {"api_name": "dtmm.color.specter2color", "line_number": 538, "usage_type": "call"}, {"api_name": "dtmm.color.specter2color", "line_number": 540, "usage_type": "call"}, {"api_name": "scipy.ndimage.rotate", "line_number": 542, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 542, "usage_type": "name"}, {"api_name": "matplotlib.image.imsave", "line_number": 560, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 570, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 570, "usage_type": "name"}, {"api_name": "dtmm.fft.fft2", "line_number": 585, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 611, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 612, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 612, "usage_type": "call"}, {"api_name": "dtmm.polarization.mode_polarizer", "line_number": 613, "usage_type": "call"}, {"api_name": "dtmm.fft.fft2", "line_number": 622, "usage_type": "call"}, {"api_name": "dtmm.diffract.field_diffraction_matrix", "line_number": 624, "usage_type": "call"}, {"api_name": "dtmm.fft.fft2", "line_number": 627, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 634, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 635, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 635, "usage_type": "call"}, {"api_name": "dtmm.linalg.dotmm", "line_number": 647, "usage_type": "call"}, {"api_name": "dtmm.linalg.dotmf", "line_number": 650, "usage_type": "call"}, {"api_name": "dtmm.fft.ifft2", "line_number": 651, "usage_type": "call"}, {"api_name": "dtmm.field.field2specter", "line_number": 655, "usage_type": "call"}, {"api_name": "dtmm.field.field2specter", "line_number": 657, "usage_type": "call"}, {"api_name": "dtmm.diffract.field_diffraction_matrix", "line_number": 678, "usage_type": "call"}, {"api_name": "dtmm.diffract.diffract", "line_number": 681, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 690, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 692, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 693, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 693, "usage_type": "call"}, {"api_name": "numpy.empty_like", "line_number": 695, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 697, "usage_type": "attribute"}, {"api_name": "dtmm.polarization.ray_polarizer", "line_number": 699, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 699, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 699, "usage_type": "call"}, {"api_name": "numpy.multiply", "line_number": 705, "usage_type": "call"}, {"api_name": "numpy.add", "line_number": 706, "usage_type": "call"}, {"api_name": "dtmm.linalg.dotmf", "line_number": 711, "usage_type": "call"}, {"api_name": "dtmm.field.field2specter", "line_number": 715, "usage_type": "call"}, {"api_name": "dtmm.field.field2specter", "line_number": 717, "usage_type": "call"}]} +{"seq_id": "468595220", "text": "import pygame\nimport random\nfrom colors import *\nfrom directions import *\n\n\nclass Ball(pygame.sprite.Sprite):\n\n def __init__(self, screen, width, height):\n super().__init__()\n\n self.width, self.height = width, height\n self.direction = random.choice([Directions.DOWN_LEFT, Directions.DOWN_RIGHT, Directions.UP_LEFT, Directions.UP_RIGHT])\n self.screen = screen\n self.image = pygame.Surface([10, 10])\n self.image.fill(WHITE)\n pygame.draw.rect(self.image, ORANGE, [0, 0, 10, 10])\n self.rect = self.image.get_rect()\n self.position = (width / 2 + 2, height / + 2)\n self.hits = 0\n self.speed_up = 1.0\n\n def draw(self, screen):\n screen.blit(self.image, self.rect)\n\n def hit(self):\n self.hits += 1\n self.speed_up = 1.0+self.hits/10\n\n @property\n def position(self):\n return (self.rect.x, self.rect.y)\n\n @position.setter\n def position(self, pos):\n try:\n pos_x, pos_y = pos\n except ValueError:\n raise ValueError(\"Pass an iterable with two items\")\n else:\n self.rect.x, self.rect.y = pos_x, pos_y\n\n def up_left(self):\n self.position = (self.position[0] - 10*self.speed_up, self.position[1] - 10*self.speed_up)\n\n def up_right(self):\n\n self.position = (self.position[0] + 10*self.speed_up, self.position[1] - 10*self.speed_up)\n\n def down_left(self):\n\n self.position = (self.position[0] - 10*self.speed_up, self.position[1] + 10*self.speed_up)\n\n def down_right(self):\n\n self.position = (self.position[0] + 10*self.speed_up, self.position[1] + 10*self.speed_up)\n\n def update(self):\n if self.position[1] <= 10: # upper border\n self.direction = random.choice(\n [Directions.DOWN_LEFT, Directions.DOWN_RIGHT])\n if self.position[1] >= self.height - 10: # bottom border\n self.direction = random.choice(\n [Directions.UP_LEFT, Directions.UP_RIGHT])\n\n options = {Directions.UP_LEFT: self.up_left,\n Directions.UP_RIGHT: self.up_right,\n Directions.DOWN_LEFT: self.down_left,\n Directions.DOWN_RIGHT: self.down_right,\n }\n options[self.direction]()\n\n def toggle_direction(self):\n if self.direction == Directions.DOWN_LEFT:\n new_direction = Directions.DOWN_RIGHT\n\n if self.direction == Directions.DOWN_RIGHT:\n new_direction = Directions.DOWN_LEFT\n\n if self.direction == Directions.UP_RIGHT:\n new_direction = Directions.UP_LEFT\n\n if self.direction == Directions.UP_LEFT:\n new_direction = Directions.UP_RIGHT\n\n try:\n self.direction = new_direction\n except NameError:\n pass\n\n def get_x_val(self):\n return self.rect.x", "sub_path": "ball.py", "file_name": "ball.py", "file_ext": "py", "file_size_in_byte": 2878, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pygame.sprite", "line_number": 7, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 13, "usage_type": "call"}, {"api_name": "pygame.Surface", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.draw.rect", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 17, "usage_type": "attribute"}, {"api_name": "random.choice", "line_number": 60, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 63, "usage_type": "call"}]} +{"seq_id": "552571597", "text": "\"\"\"Script to reproduce Fig. 6 of Connor et al. 2017.\n\nPlot N(>S) over log S (S being the flux density) for various spectral indices.\n\"\"\"\nimport copy\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom scipy.signal import savgol_filter\n\nfrom frbpoppy import CosmicPopulation, Survey, SurveyPopulation, unpickle\n\nCREATE = False\nOBSERVE = False\nSIS = (-2, 0, 2) # Spectral indices\n\npop = {}\n\nif CREATE:\n days = 14\n n_per_day = 5000\n n_tot = n_per_day*days\n\n for si in SIS:\n\n if si == min(SIS):\n\n pop[si] = CosmicPopulation(n_tot,\n days=days,\n name=f'si-{si}',\n dm_host_model='normal',\n dm_host_mu=0,\n dm_host_sigma=0,\n dm_igm_index=1200,\n dm_igm_sigma=0,\n dm_mw_model='zero',\n emission_range=[10e6, 10e9],\n lum_range=[1e40, 1e40],\n lum_index=0,\n n_model='vol_co',\n pulse_model='uniform',\n pulse_range=[1., 1.],\n pulse_mu=1.,\n pulse_sigma=0.,\n repeat=0.,\n si_mu=si,\n si_sigma=0.,\n z_max=2.5)\n pop[si].save()\n\n else:\n pop[si] = copy.deepcopy(pop[min(SIS)])\n pop[si].frbs.si = np.random.normal(si, 0, n_tot)\n pop[si].name = f'si-{si}'\n pop[si].save()\n\npop_obs = {}\n\nif OBSERVE or CREATE:\n\n for si in SIS:\n\n if not CREATE:\n pop[si] = unpickle(f'si-{si}')\n\n # Create Survey\n perfect = Survey('perfect', gain_pattern='perfect')\n\n # Observe populations\n pop_obs[si] = SurveyPopulation(pop[si], perfect)\n pop_obs[si].name = f'si-{si}-obs'\n pop_obs[si].rates()\n pop_obs[si].save()\n\nelse:\n for si in SIS:\n pop_obs[si] = unpickle(f'si-{si}-obs')\n\n\n# Plot log N and alpha versus log S\nf, (ax1, ax2) = plt.subplots(2, 1, sharex=True)\n\nmin_s = 1e99\nmax_s = -1e99\n\nfor si in SIS:\n\n pop = pop_obs[si]\n\n s_peak = pop.frbs.s_peak\n\n # Bin up\n number, bins = np.histogram(np.log10(s_peak), bins=500) # N(S)\n n_gt_s = np.cumsum(number[::-1])[::-1] # N(>S) from N(S)\n x = bins[:-1] # log(S)\n y = np.log10(n_gt_s) # log(N(>S))\n\n ax1.step(x, y, where='pre', label=fr\"$\\gamma$ of {si}\")\n\n # Plot alpha\n # Calculate derivative\n der = np.diff(y) / np.diff(x)\n bin_centres = (x[:-1] + x[1:]) / 2\n\n # Smooth function\n derhat = savgol_filter(der, 51, 3)\n ax2.step(bin_centres, derhat, where='mid')\n\n if min(bin_centres) <= min_s:\n min_s = min(bin_centres)\n if max(bin_centres) >= max_s:\n max_s = max(bin_centres)\n\n# Add a -3/2 slope\nx = np.linspace(min_s, max_s, 1000)\ny = -1.5*x\ny -= min(y)\ny += min(np.log10(n_gt_s))\nx = x[y <= max(np.log10(n_gt_s))]\ny = y[y <= max(np.log10(n_gt_s))]\nax1.step(x, y, where='mid', color='grey', alpha=0.5)\n\n# Plot alpha over log S\n\n# Plot a Euclidean line\nx = np.linspace(min_s, max_s, 1000)\ny = np.ones_like(x) * -1.5\nax2.step(x, y, where='mid', color='grey', alpha=0.5)\n\n\nax1.set_ylabel(r'log N(>S$_{\\text{peak}}$)')\nax1.legend()\nax2.set_xlabel(r'log S$_{\\text{peak}}$')\nax2.set_ylabel(r'$\\alpha$')\nax2.set_ylim(ax2.get_ylim()[::-1])\n\nplt.tight_layout()\nplt.savefig(f'plots/logn_logs_si.pdf')\n", "sub_path": "tests/logn_logs_spectral_index.py", "file_name": "logn_logs_spectral_index.py", "file_ext": "py", "file_size_in_byte": 3814, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "frbpoppy.CosmicPopulation", "line_number": 28, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.random.normal", "line_number": 53, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 53, "usage_type": "attribute"}, {"api_name": "frbpoppy.unpickle", "line_number": 64, "usage_type": "call"}, {"api_name": "frbpoppy.Survey", "line_number": 67, "usage_type": "call"}, {"api_name": "frbpoppy.SurveyPopulation", "line_number": 70, "usage_type": "call"}, {"api_name": "frbpoppy.unpickle", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 81, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 81, "usage_type": "name"}, {"api_name": "numpy.histogram", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.diff", "line_number": 102, "usage_type": "call"}, {"api_name": "scipy.signal.savgol_filter", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 118, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 119, "usage_type": "call"}, {"api_name": "numpy.log10", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 126, "usage_type": "call"}, {"api_name": "numpy.ones_like", "line_number": 127, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}]} +{"seq_id": "110553004", "text": "from datetime import datetime\n\nfrom app.db import DBSession\nfrom .base_checker import BaseEngineStatusChecker, EngineStatus\nfrom const.query_execution import QueryEngineStatus\nfrom lib.query_executor.base_executor import QueryExecutorBaseClass\nfrom lib.query_executor.all_executors import get_executor_class\n\nfrom lib.utils.utils import Timeout\nfrom logic.admin import get_query_engine_by_id\n\n\nclass ConnectionChecker(BaseEngineStatusChecker):\n @classmethod\n def NAME(cls) -> str:\n return \"ConnectionChecker\"\n\n @classmethod\n def _perform_check(cls, engine_id: int) -> EngineStatus:\n with DBSession() as session:\n engine = get_query_engine_by_id(engine_id, session=session)\n executor_params = engine.get_engine_params()\n\n return check_connection(\n get_executor_class(engine.language, engine.executor), executor_params\n )\n\n\ndef check_connection(\n executor: QueryExecutorBaseClass, client_settings: {}\n) -> EngineStatus:\n result: EngineStatus = {\"status\": QueryEngineStatus.GOOD.value, \"messages\": []}\n try:\n with Timeout(20, \"Connection took too long\"):\n cursor = executor._get_client(client_settings).cursor()\n utc_now_str = datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\")\n result[\"messages\"].append(\n f\"Connection check successed at {utc_now_str} UTC\"\n )\n del cursor\n except Exception as e:\n result[\"status\"] = QueryEngineStatus.ERROR.value\n result[\"messages\"].append(str(e))\n\n return result\n", "sub_path": "querybook/server/lib/engine_status_checker/connection_checker.py", "file_name": "connection_checker.py", "file_ext": "py", "file_size_in_byte": 1583, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "base_checker.BaseEngineStatusChecker", "line_number": 13, "usage_type": "name"}, {"api_name": "app.db.DBSession", "line_number": 20, "usage_type": "call"}, {"api_name": "logic.admin.get_query_engine_by_id", "line_number": 21, "usage_type": "call"}, {"api_name": "lib.query_executor.all_executors.get_executor_class", "line_number": 25, "usage_type": "call"}, {"api_name": "base_checker.EngineStatus", "line_number": 19, "usage_type": "name"}, {"api_name": "lib.query_executor.base_executor.QueryExecutorBaseClass", "line_number": 30, "usage_type": "name"}, {"api_name": "base_checker.EngineStatus", "line_number": 32, "usage_type": "name"}, {"api_name": "const.query_execution.QueryEngineStatus.GOOD", "line_number": 32, "usage_type": "attribute"}, {"api_name": "const.query_execution.QueryEngineStatus", "line_number": 32, "usage_type": "name"}, {"api_name": "lib.utils.utils.Timeout", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 36, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "name"}, {"api_name": "const.query_execution.QueryEngineStatus.ERROR", "line_number": 42, "usage_type": "attribute"}, {"api_name": "const.query_execution.QueryEngineStatus", "line_number": 42, "usage_type": "name"}, {"api_name": "base_checker.EngineStatus", "line_number": 31, "usage_type": "name"}]} +{"seq_id": "625944184", "text": "import Interface as it\r\nfrom Modelisation import Modele\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import *\r\nfrom PyQt5.QtCore import Qt\r\nfrom JoueurHumain import JoueurHumain\r\nfrom JoueurOrdi import JoueurOrdi\r\nfrom PyQt5 import QtTest\r\nfrom Interface import GUI_Kalaha\r\n\r\nclass Jeu():\r\n \"\"\"\r\n Classe principale reliant les classes entre elles\r\n \"\"\"\r\n def __init__(self, gui, j2):\r\n \r\n self.gui = gui\r\n self.modelisation = Modele()\r\n self.joueur = 1\r\n self.Joueur1 = JoueurHumain()\r\n self.Joueur2 = j2\r\n self.playing = True\r\n self.wait = False\r\n print(self.gui.jeu)\r\n \r\n self.gui.jeu = self\r\n \r\n print(\"ca va\")\r\n\r\n def reception_clic(self, col, ligne):\r\n \"\"\"\r\n Commande à effectuer lorsqu'un clic est réalisé sur une des zones de jeu\r\n\r\n :param col: colonne du clic\r\n :param ligne: ligne du clic\r\n :return:\r\n \"\"\"\r\n\r\n #Seul un joueur humain peut cliquer on ne valide donc le passage que si c'est au tour d'un joueur humain\r\n if (self.joueur == 1 and self.Joueur1.humain) or (self.joueur == 2 and self.Joueur2.humain) and self.playing:\r\n self.deplacer(col, ligne)\r\n\r\n def choix_clic(self, col):\r\n \"\"\"\r\n Commande à effectuer dans le cas d'un choix d'ordinateur\r\n\r\n :param col: Colonne désiré\r\n :return:\r\n \"\"\"\r\n # L'ordinateur envoie seulement la colonne et selon sa place on prend la ligne selon sa place\r\n if self.joueur == 1:\r\n self.deplacer(col, 1)\r\n else :\r\n self.deplacer(col, 0)\r\n\r\n\r\n\r\n def deplacer(self, col, ligne):\r\n \"\"\"\r\n Fonction qui accomplit le déplacement d'une case et des étapes de vérification qui suivent\r\n\r\n :param col: colonne où jouer\r\n :param ligne: ligne où jouer\r\n :return:\r\n \"\"\"\r\n\r\n #La variable choix permet de visualiser le choix de case si un ordi joue\r\n #Il vaut -2 si il n'a pas été encore réalisé et -1 si c'est à un humain de jouer\r\n choix = -2\r\n if ligne == 0:\r\n # L'indexation est en sens inverse sur la ligne supérieur on corrige donc cela\r\n col = 5 - col\r\n\r\n # On obtient alors la position de la case dans la liste\r\n indice = col + ligne * 7\r\n #On vérifie que l'on a le droit de jouer\r\n #Le coup doit être sur la bonne ligne\r\n #La case ne doit pas être vide\r\n #Le rendu doit être fini\r\n if ((self.joueur == 1 and ligne == 1) or (self.joueur == 2 and ligne == 0)) and self.modelisation.plateau[indice] != 0 and not self.wait:\r\n self.wait = True\r\n retour = self.modelisation.jouer(indice)\r\n self.modelisation.test_dernier(retour, self.joueur)\r\n self.playing = self.modelisation.fin()\r\n self.gui.MAJ_val(self.modelisation.plateau, indice)\r\n \r\n while self.gui.rendering:\r\n pass\r\n QtTest.QTest.qWait(2000)\r\n #Si on a ces conditions on joue normalement, sinon le joueur doit rejouer\r\n if ((self.joueur == 1 and retour != 13) or (self.joueur == 2 and retour != 6)) and self.playing:\r\n if self.joueur == 1:\r\n self.joueur = 2\r\n choix = self.Joueur2.jouer(self.modelisation.plateau.copy(), self.joueur)\r\n else:\r\n self.joueur = 1\r\n choix = self.Joueur1.jouer(self.modelisation.plateau.copy(), self.joueur)\r\n self.wait = False\r\n #On ne rentre que si un choix n'a pas été prit\r\n if self.joueur == 1 and choix == -2 and self.playing:\r\n choix = self.Joueur1.jouer(self.modelisation.plateau.copy(), self.joueur)\r\n elif choix == -2 and self.playing:\r\n choix = self.Joueur2.jouer(self.modelisation.plateau.copy(), self.joueur)\r\n \r\n \r\n if choix != -1 and self.playing:\r\n self.choix_clic(choix)\r\n if not self.playing:\r\n \r\n \r\n score = [self.modelisation.plateau[13], self.modelisation.plateau[6]]\r\n #self.gui.endgame(score, self.Joueur2)\r\n \r\nif __name__ == \"__main__\":\r\n app = QApplication([])\r\n\r\n gui = GUI_Kalaha()\r\n jeu = Jeu(gui, JoueurHumain())\r\n gui.show()\r\n\r\n r = app.exec()\r\n \r\n \r\n ", "sub_path": "Kalaha final/Jeu.py", "file_name": "Jeu.py", "file_ext": "py", "file_size_in_byte": 4435, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "Modelisation.Modele", "line_number": 18, "usage_type": "call"}, {"api_name": "JoueurHumain.JoueurHumain", "line_number": 20, "usage_type": "call"}, {"api_name": "PyQt5.QtTest.QTest.qWait", "line_number": 89, "usage_type": "call"}, {"api_name": "PyQt5.QtTest.QTest", "line_number": 89, "usage_type": "attribute"}, {"api_name": "PyQt5.QtTest", "line_number": 89, "usage_type": "name"}, {"api_name": "Interface.GUI_Kalaha", "line_number": 117, "usage_type": "call"}, {"api_name": "JoueurHumain.JoueurHumain", "line_number": 118, "usage_type": "call"}]} +{"seq_id": "38064828", "text": "\"\"\"empty message\n\nRevision ID: 5c1e58a63727\nRevises: 6c1fa3d9287d\nCreate Date: 2020-04-04 11:51:55.696141\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '5c1e58a63727'\ndown_revision = '6c1fa3d9287d'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('floor_loading',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('loading_type', sa.String(), nullable=True),\n sa.Column('loading', sa.String(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.drop_table('loading')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('loading',\n sa.Column('id', sa.BIGINT(), autoincrement=True, nullable=False),\n sa.Column('loading_type', sa.VARCHAR(length=50), autoincrement=False, nullable=True),\n sa.Column('loading', sa.VARCHAR(), autoincrement=False, nullable=True),\n sa.PrimaryKeyConstraint('id', name='loading_pkey')\n )\n op.drop_table('floor_loading')\n # ### end Alembic commands ###\n", "sub_path": "migrations/versions/5c1e58a63727_.py", "file_name": "5c1e58a63727_.py", "file_ext": "py", "file_size_in_byte": 1168, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "alembic.op.create_table", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Integer", "line_number": 22, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 23, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 25, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 27, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 27, "usage_type": "name"}, {"api_name": "alembic.op.create_table", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 33, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.BIGINT", "line_number": 34, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 35, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.VARCHAR", "line_number": 36, "usage_type": "call"}, {"api_name": "sqlalchemy.PrimaryKeyConstraint", "line_number": 37, "usage_type": "call"}, {"api_name": "alembic.op.drop_table", "line_number": 39, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 39, "usage_type": "name"}]} +{"seq_id": "407028444", "text": "from PyPDF2 import PdfFileMerger, PdfFileReader, PdfFileWriter\nimport os\nimport glob\n\nclass MergeAllPDF:\n def __init__(self):\n self.mergelist = []\n\n def create(self, filepath, outpath, outfilename):\n self.outfilname = outfilename\n self.filepath = filepath\n self.outpath = outpath\n self.pdfs = glob.glob(self.filepath)\n self.myrange = len(self.pdfs)\n\n for _ in range(self.myrange):\n if self.pdfs:\n self.mergelist.append(self.pdfs.pop(0))\n self.merge()\n\n def merge(self):\n if self.mergelist:\n self.merger = PdfFileMerger()\n for pdf in self.mergelist:\n self.merger.append(open(pdf, 'rb')) \n self.merger.write(self.outpath + \"%s.pdf\" % (self.outfilname))\n self.merger.close()\n self.mergelist = []\n else:\n print(\"mergelist is empty please check your input path\")\n\n# example how to use\n#update your path here:\n\n\ninpath = r\"/Users/liqiao/Desktop/new/*\" #here are your single page pdfs stored\noutpath = r\"/Users/liqiao/Desktop/new\" #here your merged pdf will be stored\n\nb = MergeAllPDF()\nb.create(inpath, outpath, \"mergedpdf\")\n", "sub_path": "mergePDF-version2.py", "file_name": "mergePDF-version2.py", "file_ext": "py", "file_size_in_byte": 1206, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "glob.glob", "line_number": 13, "usage_type": "call"}, {"api_name": "PyPDF2.PdfFileMerger", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "348065977", "text": "from os import listdir\nfrom os import makedirs\nfrom os.path import isfile, join\nimport json\n\nclass SlicedDataFormatter:\n\t\"\"\"docstring for SlicedDataFormatter\"\"\"\n\tdef __init__(self, folderName):\n\t\tself.folderName = folderName\n\t\tself.process()\n\n\tdef process(self):\n\t\tprint(listdir(self.folderName))\n\t\t\n\n\t\tfor file in listdir(self.folderName):\n\t\t\twith open(self.folderName + \"/\" + file) as fl:\n\t\t\t\tjsonData = json.load(fl)\n\t\t\t\tprint(\"length\", len(jsonData))\n\t\t\t\tprint(jsonData[0][\"pupilListSmoothed\"][:5])\n\t\t", "sub_path": "FinalEvaluation/SlicedDataFormatter.py", "file_name": "SlicedDataFormatter.py", "file_ext": "py", "file_size_in_byte": 505, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.listdir", "line_number": 13, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 16, "usage_type": "call"}, {"api_name": "json.load", "line_number": 18, "usage_type": "call"}]} +{"seq_id": "57619942", "text": "# -*- coding: utf-8 -*-\nimport numpy as np\nimport pandas as pd\nimport os\nimport pytest\nfrom unittest.mock import patch\n\nfrom intake_geopandas import geopandas\n\n\n@pytest.fixture\ndef data_filenames():\n basedir = os.path.dirname(__file__)\n return dict(stations=os.path.join(basedir, 'data', 'stations', 'stations.shp'))\n\n\n@pytest.fixture\ndef stations_datasource(data_filenames):\n return geopandas.ShapeSource(data_filenames['stations'])\n\n\ndef test_stations_datasource(stations_datasource):\n info = stations_datasource.discover()\n\n assert info['dtype'] == {'name': 'object',\n 'marker-col': 'object',\n 'marker-sym': 'object',\n 'line': 'object',\n 'geometry': 'object'}\n", "sub_path": "tests/test_source.py", "file_name": "test_source.py", "file_ext": "py", "file_size_in_byte": 790, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.path.dirname", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pytest.fixture", "line_number": 11, "usage_type": "attribute"}, {"api_name": "intake_geopandas.geopandas.ShapeSource", "line_number": 19, "usage_type": "call"}, {"api_name": "intake_geopandas.geopandas", "line_number": 19, "usage_type": "name"}, {"api_name": "pytest.fixture", "line_number": 17, "usage_type": "attribute"}]} +{"seq_id": "334987329", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nImplements the behavior associated with the 'join' (Beta) node in a RETE (RETE/UL)\nnetwork:\n\n- Stores tokens in two memories\n- Tokens in memories are checked for consistent bindings (unification)\n for variables in common *across* both\n- Network 'trigger' is propagated downward\n\nThis reference implementation follows, quite closely, the algorithms presented\nin Robert Doorenbos’ PhD thesis (1995) `“Production Matching for Large Learning Systems” `_\n\nA N3 Triple is a working memory element (WME)\n\nThe Memories are implemented with consistent binding hashes. Unlinking is not\nimplemented but null activations are mitigated (somewhat) by the hash / set\nmechanism.\n\n\n\n\"\"\"\nimport copy\nfrom functools import reduce\nfrom pprint import pformat\n\nfrom rdflib import BNode, Variable\nfrom rdflib.util import first\n\nfrom FuXi.util import project\n\nfrom .AlphaNode import AlphaNode, BuiltInAlphaNode, ReteToken\nfrom .Node import Node\n\n__all__ = [\n \"BetaNode\",\n \"PartialInstantiation\",\n \"ReteMemory\",\n]\n\n\nLEFT_MEMORY = 1\nRIGHT_MEMORY = 2\n\n# Implementn left unlinking?\nLEFT_UNLINKING = False\n\nmemoryPosition = {\n LEFT_MEMORY: \"left\",\n RIGHT_MEMORY: \"right\",\n}\n\n\ndef _collectVariables(node):\n \"\"\"\n Utility function for locating variables common to the patterns in both left and right nodes\n \"\"\"\n if isinstance(node, BuiltInAlphaNode):\n return set()\n if isinstance(node, AlphaNode):\n return set(\n [term for term in node.triplePattern if isinstance(term, (Variable, BNode))]\n )\n elif node:\n combinedVars = set()\n combinedVars |= node.leftVariables\n combinedVars |= node.rightVariables\n return combinedVars\n else:\n return set()\n\n\ndef _any(seq, pred=None):\n \"\"\"Returns True if pred(x) is true for at least one element in the iterable\"\"\"\n for elem in filter(pred, seq):\n return True\n return False\n\n\nclass ReteMemory(set):\n def __init__(self, betaNode, position, filter=None):\n super(ReteMemory, self).__init__(())\n self.filter = filter\n self.successor = betaNode\n self.position = position\n self.substitutionDict = {} # hashed\n\n def union(self, other):\n \"\"\"Return the union of two sets as a new set.\n\n (I.e. all elements that are in either set.)\n \"\"\"\n result = ReteMemory(self.successor, self.position)\n result.update(other)\n return result\n\n def __repr__(self):\n return \"<%sMemory: %s item(s)>\" % (\n self.position == LEFT_MEMORY and \"Beta\" or \"Alpha\",\n len(self),\n )\n\n def addToken(self, token, debug=False):\n commonVarKey = []\n if isinstance(token, PartialInstantiation):\n for binding in token.bindings:\n commonVarKey = []\n for var in self.successor.commonVariables:\n commonVarKey.append(binding.get(var))\n self.substitutionDict.setdefault(tuple(commonVarKey), set()).add(token)\n else:\n for var in self.successor.commonVariables:\n commonVarKey.append(token.bindingDict.get(var))\n self.substitutionDict.setdefault(tuple(commonVarKey), set()).add(token)\n self.add(token)\n\n def reset(self):\n self.clear()\n self.substitutionDict = {}\n\n @classmethod\n def _wrap_methods(cls, names):\n def wrap_method_closure(name):\n def inner(self, *args):\n result = getattr(super(cls, self), name)(*args)\n if isinstance(result, set) and not hasattr(result, \"foo\"):\n result = cls(result, foo=self.foo)\n return result\n\n inner.fn_name = name\n setattr(cls, name, inner)\n\n for name in names:\n wrap_method_closure(name)\n\n\nReteMemory._wrap_methods(\n [\n \"__ror__\",\n \"difference_update\",\n \"__isub__\",\n \"symmetric_difference\",\n \"__rsub__\",\n \"__and__\",\n \"__rand__\",\n \"intersection\",\n \"difference\",\n \"__iand__\",\n \"__ixor__\",\n \"symmetric_difference_update\",\n \"__or__\",\n \"copy\",\n \"__rxor__\",\n \"intersection_update\",\n \"__xor__\",\n \"__ior__\",\n \"__sub__\",\n ]\n)\n\n\nclass PartialInstantiation(object):\n \"\"\"\n Represents a set of WMEs 'joined' along one or more common variables from\n an ancestral join node 'up' the network\n\n In the RETE/UL PhD thesis, this is refered to as a token, which contains a\n set of WME triples. This is a bit of a clash with the use of the same\n word (in the original Forgy paper) to describe what is essentially a WME\n and whether or not it is an addition to the networks memories or a\n removal\n\n It is implemented (in the RETE/UL thesis) as a linked list of::\n\n structure token:\n parent: token {points to the higher token, for items 1...i-1}\n wme: WME {gives item i}\n end\n\n Here it is instead implemented as a set of WME triples associated with a\n list of variables whose bindings are consistent\n\n >>> aNode = AlphaNode((Variable(\"X\"), RDF.type, Variable(\"C\")))\n >>> token = ReteToken((URIRef(\"urn:uuid:Boo\"), RDF.type, URIRef(\"urn:uuid:Foo\")))\n >>> token = token.bindVariables(aNode)\n >>> PartialInstantiation([token]) # doctest: +SKIP\n urn:uuid:Boo, C->urn:uuid:Foo>}>\n >>> for token in PartialInstantiation([token]): # doctest: +SKIP\n ... print(token)\n urn:uuid:Boo, C->urn:uuid:Foo>\n \"\"\"\n\n def __init__(self, tokens=None, debug=False, consistentBindings=None):\n \"\"\"\n Note a hash is calculated by\n sorting & concatenating the hashes of its tokens\n \"\"\"\n self.joinedBindings = consistentBindings and consistentBindings or {}\n self.inconsistentVars = set()\n self.debug = debug\n self.tokens = set()\n self.bindings = []\n if tokens:\n for token in tokens:\n self.add(token, noPostProcessing=True)\n self._generateHash()\n self._generateBindings()\n\n def copy(self):\n tokenList = []\n for token in self.tokens:\n wme = copy.deepcopy(token)\n tokenList.append(wme)\n return PartialInstantiation(tokenList, consistentBindings=self.joinedBindings)\n\n def _generateHash(self):\n tokenHashes = [hash(token) for token in self.tokens]\n tokenHashes.sort()\n self.hash = hash(reduce(lambda x, y: x + y, tokenHashes))\n\n def unify(self, left, right):\n \"\"\"\n Takes two dictionary and collapses it if there are no overlapping 'bindings' or\n 'rounds out' both dictionaries so they each have each other's non-overlapping binding\n \"\"\"\n bothKeys = [\n key\n for key in list(left.keys()) + list(right.keys())\n if key not in self.joinedBindings\n ]\n if len(bothKeys) == len(set(bothKeys)):\n joinDict = left.copy()\n joinDict.update(right)\n return joinDict\n else:\n rCopy = right.copy()\n left.update(\n project(rCopy, [key for key in list(right.keys()) if key not in left])\n )\n lCopy = left.copy()\n right.update(\n project(lCopy, [key for key in list(left.keys()) if key not in right])\n )\n return [left, right]\n\n def _generateBindings(self):\n \"\"\"\n Generates a list of dictionaries - each a unique variable substitution (binding)\n which applies to the ReteTokens in this PartialInstantiation\n\n Unjoined variables with different names aren't bound to the same value\n (B and Y aren't both bound to \"Bart Simpson\" simultaneously)\n\n Different variables which bind to the same value *within* a token includes this combination\n in the resulting bindings\n\n \"\"\"\n\n def product(*args):\n if not args:\n return iter(((),)) # yield tuple()\n return (\n items + (item,) for items in product(*args[:-1]) for item in args[-1]\n )\n\n disjunctiveDict = {}\n for token in self.tokens:\n for key, val in list(token.bindingDict.items()):\n disjunctiveDict.setdefault(key, set()).add(val)\n keys = list(disjunctiveDict)\n bindings = [\n dict([(keys[idx], val) for idx, val in enumerate(entry)])\n for entry in product(\n *tuple([disjunctiveDict[var] for var in disjunctiveDict])\n )\n ]\n self.bindings = bindings\n\n def __hash__(self):\n return self.hash\n\n def __eq__(self, other):\n return hash(self) == hash(other)\n\n def add(self, token, noPostProcessing=False):\n \"\"\"\n >>> aNode = AlphaNode((Variable(\"S\"), Variable(\"P\"), Variable(\"O\")))\n >>> token1 = ReteToken((URIRef(\"urn:uuid:Boo\"), RDF.type, URIRef(\"urn:uuid:Foo\")))\n >>> token2 = ReteToken((URIRef(\"urn:uuid:Foo\"), RDF.type, URIRef(\"urn:uuid:Boo\")))\n >>> inst = PartialInstantiation([token1.bindVariables(aNode),token2.bindVariables(aNode)])\n >>> inst # doctest: +SKIP\n urn:uuid:Boo, P->http://www.w3.org/1999/02/22-rdf-syntax-ns#type, O->urn:uuid:Foo>, urn:uuid:Foo, P->http://www.w3.org/1999/02/22-rdf-syntax-ns#type, O->urn:uuid:Boo>}>\n \"\"\"\n self.tokens.add(token)\n if not noPostProcessing:\n self._generateHash()\n self._generateBindings()\n\n def __repr__(self):\n if self.joinedBindings:\n joinMsg = \" (joined on %s)\" % (\n \" ,\".join([\"?\" + v for v in self.joinedBindings])\n )\n else:\n joinMsg = \"\"\n return \"\" % (joinMsg, self.tokens)\n\n def __iter__(self):\n for i in self.tokens:\n yield i\n\n def __len__(self):\n return len(self.tokens)\n\n def addConsistentBinding(self, newJoinVariables):\n # newJoinDict = self.joinedBindings.copy()\n # only a subset of the tokens in this partial instantiation will be 'merged' with\n # the new token - joined on the new join variables\n newJoinDict = dict([(v, None) for v in newJoinVariables])\n unmappedJoinVars = set(newJoinDict)\n # newJoinDict.update(dict([(v,None) for v in newJoinVariables]))\n for binding in self.bindings:\n for key, val in newJoinDict.items():\n boundVal = binding.get(key)\n if boundVal is not None:\n unmappedJoinVars.discard(key)\n if val is None:\n newJoinDict[key] = boundVal\n if unmappedJoinVars:\n for unmappedVar in unmappedJoinVars:\n for token in self.tokens:\n unmappedVarVal = token.getVarBindings().get(unmappedVar)\n if unmappedVarVal is not None:\n assert (\n newJoinDict[unmappedVar] is None\n or unmappedVarVal == newJoinDict[unmappedVar]\n )\n newJoinDict[unmappedVar] = unmappedVarVal\n self.joinedBindings.update(newJoinDict)\n self._generateBindings()\n\n def newJoin(self, rightWME, newJoinVariables):\n \"\"\"\n >>> aNode1 = AlphaNode((Variable(\"P1\"), RDF.type, URIRef(\"urn:uuid:Prop1\")))\n >>> aNode2 = AlphaNode((Variable(\"P2\"), RDF.type, URIRef(\"urn:uuid:Prop1\")))\n >>> aNode3 = AlphaNode((Variable(\"P1\"), Variable(\"P2\"), RDFS.Class))\n >>> token1 = ReteToken((RDFS.domain, RDFS.domain, RDFS.Class))\n >>> token2 = ReteToken((RDFS.domain, RDF.type, URIRef(\"urn:uuid:Prop1\")))\n >>> token3 = ReteToken((RDFS.range, RDF.type, URIRef(\"urn:uuid:Prop1\")))\n >>> token4 = ReteToken((RDFS.range, RDFS.domain, RDFS.Class))\n >>> token5 = ReteToken((RDFS.domain, RDF.type, URIRef(\"urn:uuid:Prop1\"))).bindVariables(aNode2)\n >>> inst = PartialInstantiation([token2.bindVariables(aNode1), token3.bindVariables(aNode2), token5])\n >>> pprint(list(inst.tokens)) # doctest: +SKIP\n [http://www.w3.org/2000/01/rdf-schema#range>,\n http://www.w3.org/2000/01/rdf-schema#domain>,\n http://www.w3.org/2000/01/rdf-schema#domain>]\n >>> newInst = inst.newJoin(token1.bindVariables(aNode3),[Variable(\"P2\")])\n >>> token1 # doctest: +SKIP\n http://www.w3.org/2000/01/rdf-schema#domain, P2->http://www.w3.org/2000/01/rdf-schema#domain>\n >>> newInst # doctest: +SKIP\n http://www.w3.org/2000/01/rdf-schema#domain, P2->http://www.w3.org/2000/01/rdf-schema#domain>, http://www.w3.org/2000/01/rdf-schema#range>, http://www.w3.org/2000/01/rdf-schema#domain>, http://www.w3.org/2000/01/rdf-schema#domain>}>\n >>> pprint(list(newInst.tokens)) # doctest: +SKIP\n [http://www.w3.org/2000/01/rdf-schema#domain, P2->http://www.w3.org/2000/01/rdf-schema#domain>,\n http://www.w3.org/2000/01/rdf-schema#range>,\n http://www.w3.org/2000/01/rdf-schema#domain>,\n http://www.w3.org/2000/01/rdf-schema#domain>]\n \"\"\"\n newJoinDict = self.joinedBindings.copy()\n if newJoinVariables:\n # only a subset of the tokens in this partial instantiation will be 'merged' with\n # the new token - joined on the new join variables\n newJoinDict.update(project(rightWME.bindingDict, newJoinVariables))\n newPInst = PartialInstantiation([], consistentBindings=newJoinDict)\n for token in self.tokens:\n commonVars = False\n for newVar in [\n x\n for x in newJoinVariables\n if x in token.bindingDict\n and rightWME.bindingDict[x] == token.bindingDict[x]\n ]:\n # consistent token\n commonVars = True\n newPInst.add(token, noPostProcessing=True)\n if not commonVars:\n # there are no common variables, no need to check\n newPInst.add(token, noPostProcessing=True)\n else:\n # all of the tokens in this partial instantiation are already bound consistently with\n # respect to the new token\n newPInst = PartialInstantiation([], consistentBindings=newJoinDict)\n for token in self.tokens:\n newPInst.add(token, noPostProcessing=True)\n newPInst.add(rightWME)\n return newPInst\n\n\nclass BetaNode(Node):\n \"\"\"\n Performs a rete network join between partial instantiations in its\n left memory and tokens in its memories\n\n .. pull-quote::\n\n The data structure for a join node, therefore, must contain pointers\n to its two memory nodes (so they can be searched), a specification of\n any variable binding consistency tests to be performed, and a list of\n the node's children. .. (the beta memory is always its parent).\n\n Setup 3 alpha nodes (Triple Patterns)::\n\n aNode1 = ?X rdf:value 1\n aNode2 = ?X rdf:type ?Y\n aNode3 = ?Z ?W\n\n >>> aNode1 = AlphaNode((Variable(\"X\"), RDF.value, Literal(2)))\n >>> aNode2 = AlphaNode((Variable(\"X\"), RDF.type, Variable(\"Y\")))\n >>> aNode3 = AlphaNode((Variable(\"Z\"), URIRef(\"urn:uuid:Prop1\"), Variable(\"W\")))\n\n\n Rete Network::\n\n aNode1\n |\n joinNode1\n \\\\ aNode2\n \\\\ / aNode3\n joinNode2 /\n \\\\ /\n \\\\ /\n joinNode3\n\n joinNode3 is the Terminal node\n\n >>> joinNode1 = BetaNode(None, aNode1, aPassThru=True)\n >>> joinNode1.connectIncomingNodes(None, aNode1)\n >>> joinNode2 = BetaNode(joinNode1, aNode2)\n >>> joinNode2.connectIncomingNodes(joinNode1, aNode2)\n >>> joinNode3 = BetaNode(joinNode2, aNode3)\n >>> joinNode3.connectIncomingNodes(joinNode2, aNode3)\n\n >>> joinNode1\n \n >>> joinNode2\n \n\n Setup tokens (RDF triples)::\n\n token1 = rdf:value 2\n token2 = rdf:value 2\n token3 = rdf:type (fires network)\n\n token3 is set with a debug 'trace' so its path through the network is\n printed along the way\n\n token4 = rdf:type \n token5 = (fires network)\n token6 = (fires network)\n\n >>> token1 = ReteToken((URIRef(\"urn:uuid:Boo\"), RDF.value, Literal(2)))\n >>> token2 = ReteToken((URIRef(\"urn:uuid:Foo\"), RDF.value, Literal(2)))\n >>> token3 = ReteToken((URIRef(\"urn:uuid:Foo\"), RDF.type, URIRef(\"urn:uuid:Baz\")), debug=True)\n >>> token4 = ReteToken((URIRef(\"urn:uuid:Bash\"), RDF.type, URIRef(\"urn:uuid:Baz\")))\n >>> token5 = ReteToken((URIRef(\"urn:uuid:Bar\"), URIRef(\"urn:uuid:Prop1\"), URIRef(\"urn:uuid:Beezle\")), debug=True)\n >>> token6 = ReteToken((URIRef(\"urn:uuid:Bar\"), URIRef(\"urn:uuid:Prop1\"), URIRef(\"urn:uuid:Bundle\")))\n >>> tokenList = [token1, token2, token3, token4, token5, token6]\n\n Setup the consequent (RHS) of the rule::\n\n { ?X rdf:value 1. ?X rdf:type ?Y. ?Z ?W } => { ?X a }\n\n A Network 'stub' is setup to capture the conflict set at the time the rule\n is fired\n\n >>> joinNode3.consequent.update([(Variable(\"X\"), RDF.type, URIRef(\"urn:uuid:SelectedVar\"))])\n >>> class NetworkStub:\n ... def __init__(self):\n ... self.firings = 0\n ... self.conflictSet = set()\n ... def fireConsequent(self, tokens, termNode, debug):\n ... self.firings += 1\n ... self.conflictSet.add(tokens)\n >>> testHelper = NetworkStub()\n >>> joinNode3.network = testHelper\n\n Add the tokens sequentially to the top of the network (the alpha nodes).\n token3 triggers a trace through it's path down to the terminal node\n (joinNode2)\n\n >>> aNode1.descendentMemory[0]\n \n >>> aNode1.descendentMemory[0].position\n 2\n >>> aNode1.activate(token1.unboundCopy())\n >>> aNode1.activate(token2.unboundCopy())\n >>> joinNode1.memories[LEFT_MEMORY]\n \n >>> joinNode2.memories[LEFT_MEMORY]\n \n\n # @@DEVNOTE TODO uncommented in 1.3/4\n >>> aNode1.activate(token3.unboundCopy()) # doctest: +SKIP\n\n Add the remaining 3 tokens (each fires the network)\n\n >>> aNode2.activate(token4.unboundCopy())\n >>> list(joinNode3.memories[LEFT_MEMORY])[0] # doctest: +SKIP\n urn:uuid:Foo>, urn:uuid:Foo,Y->urn:uuid:Baz>])>\n >>> aNode3.activate(token5.unboundCopy()) # doctest: +SKIP\n Propagated from \n ('urn:uuid:Bar', 'urn:uuid:Prop1', 'urn:uuid:Beezle')\n .propagate(right,None,urn:uuid:Bar,W->urn:uuid:Beezle>)\n activating with urn:uuid:Bar,W->urn:uuid:Beezle>, urn:uuid:Foo>, urn:uuid:Foo,Y->urn:uuid:Baz>])>\n\n >>> aNode3.activate(token6.unboundCopy())\n >>> joinNode3 # doctest: +SKIP\n \n >>> testHelper.firings # doctest: +SKIP\n 2\n >>> pprint(testHelper.conflictSet) # doctest: +SKIP\n [urn:uuid:Bar,W->urn:uuid:Beezle>, urn:uuid:Foo>, urn:uuid:Foo,Y->urn:uuid:Baz>])>, urn:uuid:Bar,W->urn:uuid:Bundle>, urn:uuid:Foo>, urn:uuid:Foo,Y->urn:uuid:Baz>])>]\n \"\"\"\n\n def __init__(\n self,\n leftNode,\n rightNode,\n aPassThru=False,\n leftVariables=None,\n rightVariables=None,\n executeActions=None,\n ReteMemoryKind=ReteMemory,\n ):\n self.ReteMemoryKind = ReteMemoryKind\n self.instantiatingTokens = set()\n self.aPassThru = aPassThru\n self.name = BNode()\n self.network = None\n\n # used by terminal nodes only\n self.consequent = set() # List of tuples in RHS\n self.rules = set()\n self.antecedent = None\n self.headAtoms = set()\n self.leftNode = leftNode\n # The incoming right input of a BetaNode is always an AlphaNode\n self.rightNode = rightNode\n self.memories = {}\n self.descendentMemory = []\n self.descendentBetaNodes = set()\n self.leftUnlinkedNodes = set()\n self.unlinkedMemory = None\n self.fedByBuiltin = None\n if isinstance(leftNode, BuiltInAlphaNode):\n self.fedByBuiltin = LEFT_MEMORY\n assert not isinstance(\n rightNode, BuiltInAlphaNode\n ), \"Both %s and %s are builtins feeding a beta node!\" % (\n leftNode,\n rightNode,\n )\n self.memories[RIGHT_MEMORY] = self.ReteMemoryKind(\n (self, RIGHT_MEMORY, leftNode.n3builtin)\n )\n else:\n self.memories[RIGHT_MEMORY] = self.ReteMemoryKind(self, RIGHT_MEMORY)\n\n assert not (\n self.fedByBuiltin\n ), \"No support for 'built-ins', function symbols, or non-equality tests.\"\n if isinstance(rightNode, BuiltInAlphaNode):\n self.fedByBuiltin = RIGHT_MEMORY\n assert not isinstance(\n leftNode, BuiltInAlphaNode\n ), \"Both %s and %s are builtins feeding a beta node!\" % (\n leftNode,\n rightNode,\n )\n self.memories[LEFT_MEMORY] = self.ReteMemoryKind(\n self, LEFT_MEMORY, rightNode.n3builtin\n )\n else:\n self.memories[LEFT_MEMORY] = self.ReteMemoryKind(self, LEFT_MEMORY)\n if aPassThru:\n if rightNode:\n self.leftVariables = set() if leftVariables is None else leftVariables\n self.rightVariables = (\n _collectVariables(self.rightNode)\n if rightVariables is None\n else rightVariables\n )\n self.commonVariables = list(self.rightVariables)\n else:\n self.leftVariables = self.rightVariables = set()\n self.commonVariables = []\n else:\n self.leftVariables = (\n _collectVariables(self.leftNode)\n if leftVariables is None\n else leftVariables\n )\n self.rightVariables = (\n _collectVariables(self.rightNode)\n if rightVariables is None\n else rightVariables\n )\n self.commonVariables = [\n leftVar\n for leftVar in self.leftVariables\n if leftVar in self.rightVariables\n ]\n self.leftIndex = {}\n self.rightIndex = {}\n self.executeActions = executeActions if executeActions is not None else {}\n\n def connectIncomingNodes(self, leftNode, rightNode):\n if leftNode:\n if self.leftNode and LEFT_UNLINKING:\n # candidate for left unlinking\n self.leftUnlinkedNodes.add(leftNode)\n leftNode.unlinkedMemory = self.ReteMemoryKind(self, LEFT_MEMORY)\n # print(f\"unlinked {leftNode} from {self}\")\n elif self.leftNode:\n leftNode.updateDescendentMemory(self.memories[LEFT_MEMORY])\n leftNode.descendentBetaNodes.add(self)\n rightNode.updateDescendentMemory(self.memories[RIGHT_MEMORY])\n rightNode.descendentBetaNodes.add(self)\n\n def clauseRepresentation(self):\n if len(self.rules) > 1:\n return \"And(%s) :- %s\" % (\n \" \".join([repr(atom) for atom in self.headAtoms]),\n self.antecedent,\n )\n elif len(self.rules) > 0:\n return repr(first(self.rules).formula)\n else:\n return \"\"\n\n def actionsForTerminalNode(self):\n for rhsTriple in self.consequent:\n override, executeFn = self.executeActions.get(rhsTriple, (None, None))\n if executeFn is not None:\n yield override, executeFn\n\n def __repr__(self):\n if self.executeActions:\n actionStr = f\" with {len(list(self.actionsForTerminalNode()))} actions\"\n else:\n actionStr = \"\"\n if self.consequent and self.fedByBuiltin:\n nodeType = \"TerminalBuiltin(%s)%s\" % (\n self.memories[self._oppositeMemory(self.fedByBuiltin)].filter,\n actionStr,\n )\n elif self.consequent:\n nodeType = \"TerminalNode%s (%s)\" % (actionStr, self.clauseRepresentation())\n elif self.fedByBuiltin:\n nodeType = \"Builtin(%s)\" % (\n self.memories[self._oppositeMemory(self.fedByBuiltin)].filter\n )\n else:\n nodeType = \"BetaNode\"\n\n if self.unlinkedMemory is not None:\n nodeType = \"LeftUnlinked-\" + nodeType\n\n leftLen = self.memories[LEFT_MEMORY] and len(self.memories[LEFT_MEMORY]) or 0\n\n return \"<%s %s: CommonVariables: %s (%s in left, %s in right memories)>\" % (\n nodeType,\n self.aPassThru and \"(pass-thru)\" or \"\",\n [var.n3() for var in self.commonVariables],\n leftLen,\n len(self.memories[RIGHT_MEMORY]),\n )\n\n def _activate(self, partInstOrList, debug=False):\n if debug: # pragma: no cover\n print(f\"activating with {partInstOrList}\")\n if self.unlinkedMemory is not None:\n if debug: # pragma: no cover\n print(f\"adding {partInstOrList} into unlinked memory\")\n self.unlinkedMemory.addToken(partInstOrList, debug)\n for memory in self.descendentMemory:\n if debug: # pragma: no cover\n print(f\"\\t# {memoryPosition[memory.position]} memory #\")\n print(f\"{self}.activate: \\t{memory.successor}\")\n if memory.successor.consequent:\n print(f\"\\t{memory.successor.clauseRepresentation()}\")\n # print(f\"{self,partInstOrList}\")\n memory.addToken(partInstOrList, debug)\n if memory.successor.aPassThru or not memory.successor.checkNullActivation(\n memory.position\n ):\n if memory.position == LEFT_MEMORY:\n memory.successor.propagate(memory.position, debug, partInstOrList)\n else:\n # print(f\"{partInstOrList}\")\n memory.successor.propagate(None, debug, partInstOrList)\n\n if self.consequent:\n self.network.fireConsequent(partInstOrList, self, debug)\n\n def _unrollTokens(self, iterable):\n for token in iterable:\n if isinstance(token, PartialInstantiation):\n for i in token:\n yield i\n else:\n yield token\n\n def _oppositeMemory(self, memoryPosition):\n if memoryPosition == LEFT_MEMORY:\n return RIGHT_MEMORY\n else:\n return LEFT_MEMORY\n\n # @@DEVNOTE TODO uncalled code\n def _checkOpposingMemory(self, memoryPosition):\n return bool(len(self.memories[self._oppositeMemory(memoryPosition)]))\n\n def checkNullActivation(self, source):\n \"\"\"\n Checks whether this beta node is involved in a NULL activation relative to the source.\n NULL activations are where one of the opposing memories that feed\n this beta node are empty. Takes into account built-in filters/function.\n source is the position of the 'triggering' memory (i.e., the memory that had a token added)\n \"\"\"\n oppositeMem = self.memories[self._oppositeMemory(source)]\n return not self.fedByBuiltin and not oppositeMem\n\n def propagate(self, propagationSource, debug=False, partialInst=None, wme=None):\n \"\"\"\n .. pull-quote::\n\n activation' of Beta Node - checks for consistent variable bindings\n between memory of incoming nodes\n\n Beta (join nodes) with no variables in common with both ancestors\n activate automatically upon getting a propagation 'signal'\n\n \"\"\"\n if debug and propagationSource:\n print(\n f\"{self}.propagate({memoryPosition[propagationSource]}, {partialInst}, {wme})\"\n )\n print(\"### Left Memory ###\")\n print(f\"{pformat(list(self.memories[LEFT_MEMORY]))}\")\n print(\"###################\")\n print(\"### Right Memory ###\")\n print(f\"{pformat(list(self.memories[RIGHT_MEMORY]))}\")\n print(\"####################\")\n print(f\"{self.clauseRepresentation()}\")\n if self.aPassThru:\n if self.consequent:\n if self.rightNode is None:\n assert partialInst is not None\n self._activate(partialInst, debug)\n else:\n assert not partialInst, \"%s,%s\" % (partialInst, wme)\n self._activate(\n PartialInstantiation(\n [wme], consistentBindings=wme.bindingDict.copy()\n ),\n debug,\n )\n\n elif self.memories[RIGHT_MEMORY]:\n # pass on wme as an unjoined partInst\n # print(f\"{self}\")\n if wme:\n self._activate(\n PartialInstantiation(\n [wme], consistentBindings=wme.bindingDict.copy()\n ),\n debug,\n )\n elif partialInst:\n # print(f\"## Problem ### {self}.propagate({memoryPosition[propagationSource]}, {partialInst}, {wme})\")\n self._activate(partialInst, debug)\n elif not propagationSource:\n # Beta node right activated by another beta node\n # Need to unify on common variable hash, using the bindings\n # provided by the partial instantiation that triggered the\n # activation\n if partialInst:\n for binding in partialInst.bindings:\n # for var in self.commonVariables:\n # if var not in binding:\n # import pdb;pdb.set_trace()\n try:\n commonVals = tuple(\n [binding[var] for var in self.commonVariables]\n )\n lTokens = self.memories[RIGHT_MEMORY].substitutionDict.get(\n commonVals, set()\n )\n rTokens = self.memories[LEFT_MEMORY].substitutionDict.get(\n commonVals, set()\n )\n joinedTokens = set(self._unrollTokens(rTokens | lTokens))\n if joinedTokens:\n commonDict = dict(\n [\n (\n var,\n list(commonVals)[\n self.commonVariables.index(var)\n ],\n )\n for var in self.commonVariables\n ]\n )\n newP = PartialInstantiation(\n joinedTokens, consistentBindings=commonDict\n )\n self._activate(newP, debug)\n except KeyError:\n print(f\"\\tProblem with {partialInst}\")\n\n elif propagationSource == LEFT_MEMORY:\n # Doesn't check for null left activation! - cost is mitigated by\n # left activation, partialInst passed down\n # procedure join-node-left-activation (node: join-node, t: token)\n # for each w in node.amem.items do\n # if perform-join-tests (node.tests, t, w) then\n # for each child in node.children do\n # left-activation (child, t, w)\n # end\n matches = set()\n if self.fedByBuiltin:\n builtin = self.memories[self._oppositeMemory(self.fedByBuiltin)].filter\n newConsistentBindings = [\n term\n for term in [builtin.argument, builtin.result]\n if isinstance(term, Variable)\n and term not in partialInst.joinedBindings\n ]\n partialInst.addConsistentBinding(newConsistentBindings)\n for binding in partialInst.bindings:\n lhs = builtin.argument\n rhs = builtin.result\n lhs = binding.get(lhs) if isinstance(lhs, Variable) else lhs\n rhs = binding.get(rhs) if isinstance(rhs, Variable) else rhs\n assert lhs is not None and rhs is not None\n if builtin.func(lhs, rhs):\n if debug: # pragma: no cover\n print(f\"\\t{binding} + {builtin} => True\")\n matches.add(partialInst)\n else:\n if debug: # pragma: no cover\n print(f\"\\t{binding} + {builtin} => False\")\n else:\n for binding in partialInst.bindings:\n # iterate over the binding combinations\n # and use the substitutionDict in the right memory to find\n # matching WME'a\n if debug: # pragma: no cover\n print(f\"\\t {binding}\")\n\n substitutedTerm = []\n commonDictKV = []\n for var in self.commonVariables:\n if var not in binding:\n continue\n else:\n commonDictKV.append((var, binding[var]))\n substitutedTerm.append(binding[var])\n rWMEs = self.memories[RIGHT_MEMORY].substitutionDict.get(\n tuple(substitutedTerm), set()\n )\n commonDict = dict(commonDictKV)\n if debug: # pragma: no cover\n print(\n f\"{commonDict} {rWMEs} {list(self.memories[RIGHT_MEMORY].substitutionDict.keys())}\"\n )\n for rightWME in rWMEs:\n if isinstance(rightWME, ReteToken):\n matches.add(\n partialInst.newJoin(\n rightWME,\n [\n var\n for var in self.commonVariables\n if var not in partialInst.joinedBindings\n ],\n )\n )\n else:\n # Joining two Beta/Join nodes!\n joinedTokens = list(partialInst.tokens | rightWME.tokens)\n # print(\"### joining two tokens ###\\n {pformat(joinedTokens)}\")\n if self.consequent:\n for consequent in self.consequent:\n consVars = [\n x for x in consequent if isinstance(x, Variable)\n ]\n # [i for i in consequent if isinstance(i,Variable)]\n failed = True\n for binding in PartialInstantiation(\n joinedTokens, consistentBindings=commonDict\n ).bindings:\n if _any(consVars, lambda x: x not in binding):\n # [key for key in consVars if key not in binding]:\n continue\n else:\n failed = False\n if not failed:\n newP = PartialInstantiation(\n joinedTokens, consistentBindings=commonDict\n )\n matches.add(newP)\n else:\n newP = PartialInstantiation(\n joinedTokens, consistentBindings=commonDict\n )\n matches.add(newP)\n\n for pInst in matches:\n self._activate(pInst, debug)\n else:\n # right activation, partialInst & wme passed down\n # procedure join-node-right-activation (node: join-node, w: WME)\n # for each t in node.parent.items do {\"parent\" is the beta memory node}\n # if perform-join-tests (node.tests, t, w) then\n # for each child in node.children do left-activation (child, t, w)\n # end\n # print(f\"{pformat(self.memories[self._oppositeMemory(propagationSource)])}\")\n matches = set()\n try:\n lPartInsts = self.memories[LEFT_MEMORY].substitutionDict.get(\n tuple([wme.bindingDict[var] for var in self.commonVariables])\n )\n except Exception:\n raise Exception(\"%s and %s\" % (repr(self), repr(wme.bindingDict)))\n if lPartInsts:\n for partialInst in lPartInsts:\n if not isinstance(partialInst, PartialInstantiation):\n singleToken = PartialInstantiation(\n [partialInst],\n consistentBindings=partialInst.bindingDict.copy(),\n )\n matches.add(singleToken)\n else:\n assert isinstance(partialInst, PartialInstantiation), repr(\n partialInst\n )\n matches.add(\n partialInst.newJoin(\n wme,\n [\n var\n for var in self.commonVariables\n if var not in partialInst.joinedBindings\n ],\n )\n )\n for pInst in matches:\n self._activate(pInst, debug)\n", "sub_path": "FuXi/Rete/BetaNode.py", "file_name": "BetaNode.py", "file_ext": "py", "file_size_in_byte": 40036, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "AlphaNode.BuiltInAlphaNode", "line_number": 58, "usage_type": "argument"}, {"api_name": "AlphaNode.AlphaNode", "line_number": 60, "usage_type": "argument"}, {"api_name": "rdflib.Variable", "line_number": 62, "usage_type": "name"}, {"api_name": "rdflib.BNode", "line_number": 62, "usage_type": "name"}, {"api_name": "copy.deepcopy", "line_number": 212, "usage_type": "call"}, {"api_name": "functools.reduce", "line_number": 219, "usage_type": "call"}, {"api_name": "FuXi.util.project", "line_number": 238, "usage_type": "call"}, {"api_name": "FuXi.util.project", "line_number": 242, "usage_type": "call"}, {"api_name": "FuXi.util.project", "line_number": 372, "usage_type": "call"}, {"api_name": "Node.Node", "line_number": 398, "usage_type": "name"}, {"api_name": "rdflib.BNode", "line_number": 537, "usage_type": "call"}, {"api_name": "AlphaNode.BuiltInAlphaNode", "line_number": 554, "usage_type": "argument"}, {"api_name": "AlphaNode.BuiltInAlphaNode", "line_number": 557, "usage_type": "argument"}, {"api_name": "AlphaNode.BuiltInAlphaNode", "line_number": 571, "usage_type": "argument"}, {"api_name": "AlphaNode.BuiltInAlphaNode", "line_number": 574, "usage_type": "argument"}, {"api_name": "rdflib.util.first", "line_number": 636, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 749, "usage_type": "call"}, {"api_name": "pprint.pformat", "line_number": 752, "usage_type": "call"}, {"api_name": "rdflib.Variable", "line_number": 837, "usage_type": "argument"}, {"api_name": "rdflib.Variable", "line_number": 844, "usage_type": "argument"}, {"api_name": "rdflib.Variable", "line_number": 845, "usage_type": "argument"}, {"api_name": "AlphaNode.ReteToken", "line_number": 879, "usage_type": "argument"}, {"api_name": "rdflib.Variable", "line_number": 897, "usage_type": "argument"}]} +{"seq_id": "194006569", "text": "from pprint import pprint as pp\nimport requests\nimport pandas as pd\nimport json\nimport math\nimport datetime as dt\nimport sqlite3\nimport time \nimport os\nimport dash_html_components as html\nfrom datashop import *\nimport datetime as dt\n\nimport os\nfrom pathlib import Path\n\nworking_dir = str(Path(os.path.dirname(os.path.realpath(__file__))))\n\n#from common import working_dir\n#working_dir = str(working_dir) + '/'\n\nwith open (working_dir + '/data/cache.json','r') as cache_file:\n cache_dict = json.load(cache_file)\n\nwith open (working_dir + '/data/api_keys.json','r') as cache_file:\n api_keys = json.load(cache_file)\n\n#path = 'c:/prompt_root/CrudeOilApp'\npath = '/home/khan/CrudeOilApp'\nrelpath = '/data/nyt_jsons/'\n\n \n\n################# FUNCTIONS\n\ndef extractRow(row):\n list = row['keywords']\n all_keywords = ''\n for num,item in enumerate(list):\n row[str(num).zfill(2) + '_type'] = item['name']\n row[str(num).zfill(2) + '_keyword'] = item['value']\n all_keywords = all_keywords + '; ' + str(item['value'])\n\n row['all_keywords'] = all_keywords\n return row\n\ndef makelink(url):\n link = html.A(html.P('Full Article'),href=url)\n return link\n################### Classes\n\n\n###____________ Energy Information Agency \n\nclass EIA_Series:\n \n eia_api_url= 'http://api.eia.gov/series/?api_key={}&series_id='.format(api_keys['eia'])\n \n \n def __init__(\n self,\n id,\n name = None,\n start = '20010101',\n end = None,\n desc = None,\n date_format='%Y%m%d',\n scale = False\n ):\n \n if name == None:\n self.name = id\n else:\n self.name = name\n \n self.desc = desc\n self.scale = scale\n self.date_format = date_format\n self.end = end\n self.query = self.eia_api_url + id+'&start=' + start \n\n if self.end != None:\n self.query = self.query + '&end=' + self.end \n \n self.request = requests.get(self.query)\n self.series_dict = json.loads(self.request.text)\n self.make_df()\n \n\n def __getitem__(self,sliced):\n return self.frame[sliced]\n \n def show_response(self):\n \n pp.pprint(self.series_dict)\n \n \n def make_df(self,data_col='data',date_col='Date'):\n \n #_______ Slice out JSON to get series\n self.data_col = data_col\n self.date_col = date_col\n \n self.series_list=self.series_dict['series'][0][data_col]\n \n #________ Put it into frame\n self.frame = pd.DataFrame(self.series_list)\n self.frame.columns=[date_col, self.name] \n \n #______ Convert to datetime and set index\n \n \n self.frame[self.date_col]=pd.to_datetime(\n self.frame[self.date_col],\n format = self.date_format\n )\n \n self.frame.set_index(\n self.date_col,drop=True,inplace=True) \n self.frame.sort_index(ascending=True,inplace=True) \n self.frame = self.frame.asfreq(freq='D').fillna(method='ffill')\n\n self.frame['date_only'] = self.frame.index.astype('str').str.slice(stop=10)\n \n\n #______ caputure data as series for convenience attribute\n\n self.series = self.frame.iloc[:,0] \n \n if self.scale == True:\n self.scaler()\n \n def scaler(self):\n\n self.frame['scaled_'+self.name] = min_max_col(self.series)\n self.scaled = self.frame['scaled_'+self.name]\n\n\n\n def chart(self):\n self.fig,self.ax = plt.subplots(figsize=(10,6))\n self.ax.plot(self.frame)\n def report(self):\n print(\n \"Earliest Point: {} \\n\".format(self.data.iloc[0].name),\n \"Latest Point: {} \\n\".format(self.data.iloc[-1].name),\n \"\"\n )\n\nclass Depot:\n\n def __init__(self):\n\n self.features = {} \n\n def ingest(self,feature):\n\n self.feature = feature\n\n if len(self.features) == 0:\n self.originals = self.feature.frame[self.feature.name]\n self.scaled = self.feature.frame['scaled_'+self.feature.name]\n \n\n else:\n self.originals = pd.merge_asof(\n self.originals,\n self.feature.frame[self.feature.name],\n right_index=True,\n left_index=True\n )\n\n self.scaled = pd.merge_asof(\n self.scaled,\n self.feature.frame['scaled_'+ self.feature.name],\n right_index=True,\n left_index=True\n )\n\n\n self.features[self.feature.name] = self.feature\n\n\n\n\n###_____________ New York Times API\n\nclass nytResp():\n def __init__(self,start_date,end_date,query_term):\n self.start_date = start_date\n self.end_date = end_date\n self.query_term = query_term\n self.key = api_keys['nyt']\n self.target_url = 'https://api.nytimes.com/svc/search/v2/articlesearch.json?api-key={}'.format(self.key)\n self.cache_dict = {}\n self.cache_dict['news_update'] = str(dt.datetime.now())\n self.get_meta()\n\n\n def get_meta(self):\n\n self.params ={\n 'begin_date':self.start_date,\n 'end_date':self.end_date,\n 'q':self.query_term,\n }\n\n self.first_response_raw = requests.get(self.target_url,self.params)\n self.first_response = json.loads(self.first_response_raw.text)\n\n self.response_meta = self.first_response['response']['meta']\n\n self.total_pages = math.ceil(self.response_meta['hits']/10)\n self.hits = self.response_meta['hits']\n\n \n self.cache_dict['hits'] = self.hits\n self.cache_dict['total_pages'] = self.total_pages\n\n if self.hits > 0:\n self.get_docs()\n\n def get_docs(self):\n\n self.doc_collection = []\n\n for page in range(0,self.total_pages):\n\n self.params['page']= page\n\n self.last_response_raw = requests.get(self.target_url,self.params)\n self.last_response = json.loads(self.last_response_raw.text)\n\n self.last_docs = self.last_response['response']['docs']\n\n for doc in self.last_docs:\n self.doc_collection.append(doc)\n\n if self.total_pages > 1:\n time.sleep(7)\n\n self.build_dataframe()\n\n def build_dataframe(self):\n\n self.frame_list = []\n\n for doc in self.doc_collection:\n self.doc_dict = {\n 'id':doc['_id'],\n 'Date':doc['pub_date'][:19].replace(\"T\", \" \"),\n 'date_only': doc['pub_date'][:10],\n 'abstract':doc['abstract'],\n 'doc_type':doc['document_type'],\n 'main_headline':doc['headline']['main'],\n 'keywords':doc['keywords'],\n 'newsdesk':doc['news_desk'],\n 'url':doc['web_url'],\n 'retrieved':str(dt.datetime.now()) \n \n }\n\n self.frame_list.append(self.doc_dict)\n \n self.frame = pd.DataFrame(self.frame_list)\n #self.frame = self.frame.apply(extractRow,axis=1)\n self.frame['keywords'] = self.frame['keywords'].astype(str)\n\n \n\n\ndef jsons_to_frame(abs_path,rel_path,conn):\n art_list = []\n\n for file in os.listdir(working_dir + rel_path):\n with open(working_dir + rel_path + file) as json_file:\n chunk = json.load(json_file)\n for doc in chunk:\n doc_dict = {\n 'id':doc['_id'],\n 'Date':doc['pub_date'][:19].replace(\"T\", \" \"),\n 'date_only': doc['pub_date'][:10],\n 'abstract':doc['abstract'],\n 'doc_type':doc['document_type'],\n 'main_headline':doc['headline']['main'],\n 'keywords':doc['keywords'],\n 'newsdesk':doc['news_desk'],\n 'url':doc['web_url'],\n 'retrieved':str(dt.datetime.now()) \n \n }\n art_list.append(doc_dict)\n\n frame = pd.DataFrame(art_list)\n #frame = frame.apply(extractRow,axis=1)\n frame['keywords'] = frame['keywords'].astype(str)\n\n frame.to_sql('news',conn, if_exists='replace')\n\n return frame\n\n \n", "sub_path": "data_functions.py", "file_name": "data_functions.py", "file_ext": "py", "file_size_in_byte": 8438, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pathlib.Path", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 17, "usage_type": "call"}, {"api_name": "json.load", "line_number": 23, "usage_type": "call"}, {"api_name": "json.load", "line_number": 26, "usage_type": "call"}, {"api_name": "dash_html_components.A", "line_number": 48, "usage_type": "call"}, {"api_name": "dash_html_components.P", "line_number": 48, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 85, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 86, "usage_type": "call"}, {"api_name": "pprint.pprint.pprint", "line_number": 95, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 95, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 107, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 113, "usage_type": "call"}, {"api_name": "pandas.merge_asof", "line_number": 166, "usage_type": "call"}, {"api_name": "pandas.merge_asof", "line_number": 173, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 196, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 196, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 208, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 209, "usage_type": "call"}, {"api_name": "math.ceil", "line_number": 213, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 231, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 232, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 240, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 259, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 259, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 265, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 275, "usage_type": "call"}, {"api_name": "json.load", "line_number": 277, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 289, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 289, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 294, "usage_type": "call"}]} +{"seq_id": "32780611", "text": "# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\n\"\"\"This module contains the default message definition.\"\"\"\n\nfrom enum import Enum\nfrom typing import List, cast\n\nfrom aea.configurations.base import PublicId\nfrom aea.helpers.search.models import Description, Query\nfrom aea.protocols.base import Message\n\n\nclass OEFMessage(Message):\n \"\"\"The OEF message class.\"\"\"\n\n protocol_id = PublicId(\"fetchai\", \"oef\", \"0.1.0\")\n\n class Type(Enum):\n \"\"\"OEF Message types.\"\"\"\n\n REGISTER_SERVICE = \"register_service\"\n REGISTER_AGENT = \"register_agent\"\n UNREGISTER_SERVICE = \"unregister_service\"\n UNREGISTER_AGENT = \"unregister_agent\"\n SEARCH_SERVICES = \"search_services\"\n SEARCH_AGENTS = \"search_agents\"\n OEF_ERROR = \"oef_error\"\n DIALOGUE_ERROR = \"dialogue_error\"\n SEARCH_RESULT = \"search_result\"\n\n def __str__(self):\n \"\"\"Get string representation.\"\"\"\n return self.value\n\n class OEFErrorOperation(Enum):\n \"\"\"Operation code for the OEF. It is returned in the OEF Error messages.\"\"\"\n\n REGISTER_SERVICE = 0\n UNREGISTER_SERVICE = 1\n SEARCH_SERVICES = 2\n SEARCH_SERVICES_WIDE = 3\n SEARCH_AGENTS = 4\n SEND_MESSAGE = 5\n REGISTER_AGENT = 6\n UNREGISTER_AGENT = 7\n\n OTHER = 10000\n\n def __str__(self):\n \"\"\"Get string representation.\"\"\"\n return str(self.value)\n\n def __init__(self, type: Type, id: int, **kwargs):\n \"\"\"\n Initialize.\n\n :param type: the type of OEF message.\n :param id: the message id.\n \"\"\"\n super().__init__(type=type, id=id, **kwargs)\n assert self._is_consistent(), \"OEFMessage initialization inconsistent.\"\n\n @property\n def type(self) -> Type: # noqa: F821\n \"\"\"Get the type of the oef_message.\"\"\"\n assert self.is_set(\"type\"), \"type is not set.\"\n return OEFMessage.Type(self.get(\"type\"))\n\n @property\n def id(self) -> int:\n \"\"\"Get the id of the oef_message.\"\"\"\n assert self.is_set(\"id\"), \"id is not set.\"\n return cast(int, self.get(\"id\"))\n\n @property\n def service_description(self) -> Description:\n \"\"\"Get the service_description from the message.\"\"\"\n assert self.is_set(\"service_description\"), \"service_description is not set\"\n return cast(Description, self.get(\"service_description\"))\n\n @property\n def service_id(self) -> str:\n \"\"\"Get the service_id from the message.\"\"\"\n assert self.is_set(\"service_id\"), \"service_id is not set.\"\n return cast(str, self.get(\"service_id\"))\n\n @property\n def agent_description(self) -> Description:\n \"\"\"Get the agent_description from the message.\"\"\"\n assert self.is_set(\"agent_description\"), \"agent_description is not set.\"\n return cast(Description, self.get(\"agent_description\"))\n\n @property\n def agent_id(self) -> str:\n \"\"\"Get the agent_id from the message.\"\"\"\n assert self.is_set(\"agent_id\"), \"agent_id is not set.\"\n return cast(str, self.get(\"agent_id\"))\n\n @property\n def query(self) -> Query:\n \"\"\"Get the query from the message.\"\"\"\n assert self.is_set(\"query\"), \"query is not set.\"\n return cast(Query, self.get(\"query\"))\n\n @property\n def agents(self) -> List[str]:\n \"\"\"Get the agents from the message.\"\"\"\n assert self.is_set(\"agents\"), \"list of agents is not set.\"\n return cast(List[str], self.get(\"agents\"))\n\n @property\n def operation(self) -> OEFErrorOperation: # noqa: F821\n \"\"\"Get the error_operation code from the message.\"\"\"\n assert self.is_set(\"operation\"), \"operation is not set.\"\n return OEFMessage.OEFErrorOperation(self.get(\"operation\"))\n\n @property\n def dialogue_id(self) -> int:\n \"\"\"Get the dialogue_id from the message.\"\"\"\n assert self.is_set(\"dialogue_id\"), \"dialogue_id is not set.\"\n return cast(int, self.get(\"dialogue_id\"))\n\n @property\n def origin(self) -> str:\n \"\"\"Get the origin from the message.\"\"\"\n assert self.is_set(\"origin\"), \"origin is not set.\"\n return cast(str, self.get(\"origin\"))\n\n def _is_consistent(self) -> bool:\n \"\"\"Check that the data is consistent.\"\"\"\n try:\n assert isinstance(self.type, OEFMessage.Type), \"type not of correct type.\"\n assert isinstance(self.id, int), \"id must be int.\"\n if self.type == OEFMessage.Type.REGISTER_SERVICE:\n assert isinstance(\n self.service_description, Description\n ), \"service_description must be of type Description.\"\n assert isinstance(\n self.service_id, str\n ), \"service_id must be of type str.\"\n assert len(self.body) == 4\n elif self.type == OEFMessage.Type.REGISTER_AGENT:\n assert isinstance(\n self.agent_description, Description\n ), \"agent_description must be of type Description.\"\n assert isinstance(self.agent_id, str), \"agent_id must be of type str.\"\n assert len(self.body) == 4\n elif self.type == OEFMessage.Type.UNREGISTER_SERVICE:\n assert isinstance(\n self.service_description, Description\n ), \"service_description must be of type Description.\"\n assert isinstance(\n self.service_id, str\n ), \"service_id must be of type str.\"\n assert len(self.body) == 4\n elif self.type == OEFMessage.Type.UNREGISTER_AGENT:\n assert isinstance(\n self.agent_description, Description\n ), \"agent_description must be of type Description.\"\n assert isinstance(self.agent_id, str), \"agent_id must be of type str.\"\n assert len(self.body) == 4\n elif (\n self.type == OEFMessage.Type.SEARCH_SERVICES\n or self.type == OEFMessage.Type.SEARCH_AGENTS\n ):\n assert isinstance(self.query, Query), \"query must be of type Query.\"\n assert len(self.body) == 3\n elif self.type == OEFMessage.Type.SEARCH_RESULT:\n assert type(self.agents) == list and all(\n type(a) == str for a in self.agents\n )\n assert len(self.body) == 3\n elif self.type == OEFMessage.Type.OEF_ERROR:\n assert isinstance(\n self.operation, OEFMessage.OEFErrorOperation\n ), \"operation must be type of OEFErrorOperation\"\n assert len(self.body) == 3\n elif self.type == OEFMessage.Type.DIALOGUE_ERROR:\n assert isinstance(\n self.dialogue_id, int\n ), \"dialogue_id must be of type int.\"\n assert isinstance(self.origin, str), \"origin must be of type str.\"\n assert len(self.body) == 4\n else:\n raise ValueError(\"Type not recognized.\")\n except (AssertionError, ValueError):\n return False\n\n return True\n", "sub_path": "packages/fetchai/protocols/oef/message.py", "file_name": "message.py", "file_ext": "py", "file_size_in_byte": 7966, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "aea.protocols.base.Message", "line_number": 30, "usage_type": "name"}, {"api_name": "aea.configurations.base.PublicId", "line_number": 33, "usage_type": "call"}, {"api_name": "enum.Enum", "line_number": 35, "usage_type": "name"}, {"api_name": "enum.Enum", "line_number": 52, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 90, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 96, "usage_type": "call"}, {"api_name": "aea.helpers.search.models.Description", "line_number": 96, "usage_type": "argument"}, {"api_name": "aea.helpers.search.models.Description", "line_number": 93, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 102, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 108, "usage_type": "call"}, {"api_name": "aea.helpers.search.models.Description", "line_number": 108, "usage_type": "argument"}, {"api_name": "aea.helpers.search.models.Description", "line_number": 105, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 114, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 120, "usage_type": "call"}, {"api_name": "aea.helpers.search.models.Query", "line_number": 120, "usage_type": "argument"}, {"api_name": "aea.helpers.search.models.Query", "line_number": 117, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 126, "usage_type": "call"}, {"api_name": "typing.List", "line_number": 126, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 123, "usage_type": "name"}, {"api_name": "typing.cast", "line_number": 138, "usage_type": "call"}, {"api_name": "typing.cast", "line_number": 144, "usage_type": "call"}, {"api_name": "aea.helpers.search.models.Description", "line_number": 153, "usage_type": "argument"}, {"api_name": "aea.helpers.search.models.Description", "line_number": 161, "usage_type": "argument"}, {"api_name": "aea.helpers.search.models.Description", "line_number": 167, "usage_type": "argument"}, {"api_name": "aea.helpers.search.models.Description", "line_number": 175, "usage_type": "argument"}, {"api_name": "aea.helpers.search.models.Query", "line_number": 183, "usage_type": "argument"}]} +{"seq_id": "535369577", "text": "import socket \nimport datetime\nfrom _thread import *\nimport threading\n\n\nprint (\"Program pokrenut datuma \" + str(datetime.datetime.now()))\n\n\nplock= threading.Lock()\n\ndef threaded(c):\n while True:\n \n data=c.recv(1024)\n\n if not data:\n print('Exiting')\n \n plock.release()\n break\n \n c.send(data)\n \n c.close()\n\ndef Main():\n host=\"\"\n \n s=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.bind((host,7242)) \n s.listen(5)\n print(\"Socket sluša\")\n \n while True:\n\n c,addr=s.accept()\n \n plock.acquire()\n\n print('Connected : ' , addr[0], ':' , addr[1])\n \n start_new_thread(threaded, (c,))\n s.close()\n\nif __name__=='__main__':\n Main()", "sub_path": "LabVjezba_10/Server.py", "file_name": "Server.py", "file_ext": "py", "file_size_in_byte": 792, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "datetime.datetime.now", "line_number": 7, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 7, "usage_type": "attribute"}, {"api_name": "threading.Lock", "line_number": 10, "usage_type": "call"}, {"api_name": "socket.socket", "line_number": 30, "usage_type": "call"}, {"api_name": "socket.AF_INET", "line_number": 30, "usage_type": "attribute"}, {"api_name": "socket.SOCK_STREAM", "line_number": 30, "usage_type": "attribute"}]} +{"seq_id": "522303993", "text": "\"\"\"\n ulmo.usgs.ned.core\n ~~~~~~~~~~~~~~~~~~~~~\n\n This module provides access to the `National Elevation Dataset` provided by the `United States Geological\n Survey`_ `National Map`_ system.\n\n .. _National Elevation Dataset: http://ned.usgs.gov/\n .. _United States Geological Survey: http://www.usgs.gov/\n .. _National Map: http://nationalmap.gov\n\n\"\"\"\nfrom __future__ import print_function\nfrom past.builtins import basestring\n\nfrom geojson import Feature, FeatureCollection, Polygon\nimport hashlib\nimport json\nimport logging\nimport numpy as np\nimport os\nimport pandas as pd\nimport requests\nimport subprocess\nfrom ulmo import util\n\n\n\n# NED ftp url. \nNED_FTP_URL = 'ftp://rockyftp.cr.usgs.gov/vdelivery/Datasets/Staged/NED//IMG/'\n\n# ScienceBase webservice url for IMG format NED tiles\n# https://www.sciencebase.gov/catalog/items?fields=id,title,summary,body,tags,webLinks,dates,spatial&q=&filter=tags=National Elevation Dataset (NED) 1/9 arc-second&filter=spatialQuery=Polygon ((-95.26155638325938 40.07132704825149,-94.16292357075272 40.07132704825149,-94.16292357075272 40.594749211728654,-95.26155638325938 40.594749211728654,-95.26155638325938 40.07132704825149))&format=json\nNED_WS_URL = 'https://www.sciencebase.gov/catalog/items?fields=webLinks,spatial,title&q=&filter=tags=National Elevation Dataset (NED) %s&filter=tags=IMG&filter=spatialQuery=Polygon ((%s))&format=json&max=1000'\nSCIENCEBASE_ITEM_URL = 'https://www.sciencebase.gov/catalog/item/%s?format=json'\n\n# default file path (appended to default ulmo path)\nDEFAULT_FILE_PATH = 'usgs/ned/'\n\nlayer_dict = {\n 'Alaska 2 arc-second': '2',\n '1 arc-second': '1',\n '1/3 arc-second': '13',\n '1/9 arc-second': '19',\n }\n\n# configure logging\nLOG_FORMAT = '%(message)s'\nlogging.basicConfig(format=LOG_FORMAT)\nlog = logging.getLogger(__name__)\nlog.setLevel(logging.INFO)\n\n\ndef get_available_layers():\n \"\"\"return list of available data layers\n \"\"\"\n return list(layer_dict.keys())\n\n\ndef get_raster_availability(layer, bbox):\n \"\"\"retrieve metadata for raster tiles that cover the given bounding box \n for the specified data layer. \n\n Parameters\n ----------\n layer : str\n dataset layer name. (see get_available_layers for list)\n bbox : (sequence of float|str)\n bounding box of in geographic coordinates of area to download tiles \n in the format (min longitude, min latitude, max longitude, max latitude)\n \n Returns\n -------\n metadata : geojson FeatureCollection\n returns metadata including download urls as a FeatureCollection\n \"\"\"\n xmin, ymin, xmax, ymax = [float(n) for n in bbox]\n polygon = ','.join([(repr(x) + ' ' + repr(y)) for x,y in [\n (xmin, ymax),\n (xmin, ymin), \n (xmax, ymin), \n (xmax, ymax), \n (xmin, ymax)]])\n url = NED_WS_URL % (layer, polygon)\n \n features = []\n while url:\n print('retrieving raster availability from %s' % url)\n r = requests.get(url)\n content = r.json()\n for item in content['items']:\n feature = Feature(geometry=Polygon(_bbox2poly(item['spatial']['boundingBox'])), id=item['id'], \n properties={\n 'name': item['title'], \n 'layer': layer,\n 'format': '.img',\n 'download url': [x for x in item['webLinks'] if x['type']=='download'][0]['uri']}\n )\n features.append(feature)\n\n if content.get('nextlink'):\n url = content['nextlink']['url']\n else:\n break\n\n return FeatureCollection(features)\n\n\ndef get_raster(layer, bbox, path=None, update_cache=False, \n check_modified=False, mosaic=False):\n \"\"\"downloads National Elevation Dataset raster tiles that cover the given bounding box \n for the specified data layer. \n\n Parameters\n ----------\n layer : str\n dataset layer name. (see get_available_layers for list)\n bbox : (sequence of float|str)\n bounding box of in geographic coordinates of area to download tiles \n in the format (min longitude, min latitude, max longitude, max latitude)\n path : ``None`` or path\n if ``None`` default path will be used\n update_cache: ``True`` or ``False`` (default)\n if ``False`` and output file already exists use it.\n check_modified: ``True`` or ``False`` (default)\n if tile exists in path, check if newer file exists online and download if available. \n mosaic: ``True`` or ``False`` (default)\n if ``True``, mosaic and clip downloaded tiles to the extents of the bbox provided. Requires\n rasterio package and GDAL.\n \n Returns\n -------\n raster_tiles : geojson FeatureCollection\n metadata as a FeatureCollection. local url of downloaded data is in feature['properties']['file']\n \"\"\"\n _check_layer(layer)\n\n raster_tiles = _download_tiles(get_raster_availability(layer, bbox), path=path, \n check_modified=check_modified)\n\n if mosaic:\n if path is None:\n path = os.path.join(util.get_ulmo_dir(), DEFAULT_FILE_PATH)\n\n util.mkdir_if_doesnt_exist(os.path.join(path, 'by_boundingbox'))\n xmin, ymin, xmax, ymax = [float(n) for n in bbox]\n uid = util.generate_raster_uid(layer, xmin, ymin, xmax, ymax)\n output_path = os.path.join(path, 'by_boundingbox', uid + '.tif')\n\n if os.path.isfile(output_path) and not update_cache:\n return output_path\n\n raster_files = [tile['properties']['file'] for tile in raster_tiles['features']]\n util.mosaic_and_clip(raster_files, xmin, ymin, xmax, ymax, output_path)\n return [output_path]\n\n return raster_tiles\n\ndef _check_layer(layer):\r\n \"\"\"\r\n make sure the passed layer name is one of the handled options\r\n \"\"\"\r\n \r\n if not layer in get_available_layers():\r\n err_msg = \"The specified layer parameter ({})\".format(layer)\r\n err_msg += \"\\nis not in the available options:\"\r\n err_msg += \"\\n\\t\".join(get_available_layers())\r\n raise ValueError(err_msg)\n\ndef _get_file_index(path=None, update_cache=False):\n \"\"\"Non webservice approach for caching file index\n\n Experimental, not currently in use.\n \"\"\"\n if path is None:\n path = os.path.join(util.get_ulmo_dir(), DEFAULT_FILE_PATH)\n\n filename = os.path.join(path, 'index.json')\n\n if not os.path.exists(filename) or update_cache:\n for dirname in layer_dict.values():\n layer_path = os.path.join(path, dirname, 'zip')\n if not os.path.exists(layer_path):\n os.makedirs(layer_path)\n\n _update_file_index(filename)\n\n with open(filename) as f:\n return json.load(f)\n\n\ndef _get_tile_urls(layer, xmin, ymin, xmax, ymax, path=None):\n \"\"\"Non webservice approach to identify tile urls corresponding to the given layer and bounding box\n\n Experimental, not currently in use.\n \"\"\"\n\n base_url = NED_FTP_URL.replace('', layer_dict[layer])\n file_index = _get_file_index(path=path)\n\n if layer in ['1 arc-second', '1/3 arc-second', 'Alaska 2 arc-second']:\n lats = np.arange(np.ceil(ymin), np.ceil(ymax)+1)\n lons = np.arange(np.floor(xmin), np.floor(xmax)+1)\n files = []\n fmt_lat = lambda x: 's%0d' % np.abs(x) if x<0 else 'n%0d' % x\n fmt_lon = lambda x: 'w%03d' % np.abs(x) if x<0 else 'e%03d' % x\n fmt = '%s%s.zip'\n for lat in lats:\n for lon in lons:\n files.append(fmt % (fmt_lat(lat), fmt_lon(lon)))\n\n available_files = list(set(file_index[layer]).intersection(set(files)))\n\n urls = [base_url + filename for filename in available_files]\n return sorted(urls)\n\n if layer=='1/9 arc-second':\n raise NotImplementedError(\"1/9 arc-second NED local tile determination not implemented yet\")\n\n\ndef _update_file_index(filename):\n \"\"\" Non webservice approach for caching file index\n\n Experimental, not currently in use.\n \"\"\"\n index = {}\n for name, layer in layer_dict.items():\n print('retrieving file index for NED layer - %s' % name)\n url = NED_FTP_URL.replace('', layer)\n index[name] = sorted([line for line in util.dir_list(url) if 'zip' in line])\n \n with open(filename, 'wb') as outfile:\n json.dump(index, outfile)\n print('ned raster file index saved in %s' % filename)\n\n return filename\n\n\ndef _download_features(feature_ids, path=None, check_modified=False,):\n if path is None:\n path = os.path.join(util.get_ulmo_dir(), DEFAULT_FILE_PATH)\n\n if isinstance(feature_ids, basestring):\n feature_ids = [feature_ids]\n\n tiles =[]\n tile_fmt = '.img'\n for feature_id in feature_ids:\n url = SCIENCEBASE_ITEM_URL % feature_id\n metadata = requests.get(url).json()\n layer = [a for a in list(layer_dict.keys()) if a in metadata['title']][0]\n layer_path = os.path.join(path, layer_dict[layer])\n tile_urls = [link['uri'] for link in metadata['webLinks'] if link['type']=='download']\n tiles.append({'feature_id': feature_id,\n 'tiles': util.download_tiles(layer_path, tile_urls, tile_fmt, check_modified),\n })\n\n return tiles\n\n\ndef _bbox2poly(bbox):\n xmin = bbox['minX']\n xmax = bbox['maxX']\n ymin = bbox['minY']\n ymax = bbox['maxY']\n\n return [[(xmin,ymin), (xmin,ymax), (xmax, ymax), (xmax, ymin), (xmin, ymin)]]\n\n\ndef _download_tiles(tiles, path=None, check_modified=False):\n\n if path is None:\n path = os.path.join(util.get_ulmo_dir(), DEFAULT_FILE_PATH)\n\n for tile in tiles['features']:\n\n metadata = tile['properties']\n layer_path = os.path.join(path, layer_dict[metadata['layer']])\n tile['properties']['file'] = util.download_tiles(layer_path, metadata['download url'], metadata['format'], check_modified)[0]\n\n return tiles ", "sub_path": "ulmo/usgs/ned/core.py", "file_name": "core.py", "file_ext": "py", "file_size_in_byte": 9999, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "logging.basicConfig", "line_number": 49, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 50, "usage_type": "call"}, {"api_name": "logging.INFO", "line_number": 51, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 89, "usage_type": "call"}, {"api_name": "geojson.Feature", "line_number": 92, "usage_type": "call"}, {"api_name": "geojson.Polygon", "line_number": 92, "usage_type": "call"}, {"api_name": "geojson.FeatureCollection", "line_number": 106, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 143, "usage_type": "call"}, {"api_name": "os.path", "line_number": 143, "usage_type": "attribute"}, {"api_name": "ulmo.util.get_ulmo_dir", "line_number": 143, "usage_type": "call"}, {"api_name": "ulmo.util", "line_number": 143, "usage_type": "name"}, {"api_name": "ulmo.util.mkdir_if_doesnt_exist", "line_number": 145, "usage_type": "call"}, {"api_name": "ulmo.util", "line_number": 145, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 145, "usage_type": "call"}, {"api_name": "os.path", "line_number": 145, "usage_type": "attribute"}, {"api_name": "ulmo.util.generate_raster_uid", "line_number": 147, "usage_type": "call"}, {"api_name": "ulmo.util", "line_number": 147, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 148, "usage_type": "call"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 150, "usage_type": "call"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "ulmo.util.mosaic_and_clip", "line_number": 154, "usage_type": "call"}, {"api_name": "ulmo.util", "line_number": 154, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 176, "usage_type": "call"}, {"api_name": "os.path", "line_number": 176, "usage_type": "attribute"}, {"api_name": "ulmo.util.get_ulmo_dir", "line_number": 176, "usage_type": "call"}, {"api_name": "ulmo.util", "line_number": 176, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 178, "usage_type": "call"}, {"api_name": "os.path", "line_number": 178, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 180, "usage_type": "call"}, {"api_name": "os.path", "line_number": 180, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 182, "usage_type": "call"}, {"api_name": "os.path", "line_number": 182, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 183, "usage_type": "call"}, {"api_name": "os.path", "line_number": 183, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 184, "usage_type": "call"}, {"api_name": "json.load", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.ceil", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.floor", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 206, "usage_type": "call"}, {"api_name": "ulmo.util.dir_list", "line_number": 230, "usage_type": "call"}, {"api_name": "ulmo.util", "line_number": 230, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 233, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 241, "usage_type": "call"}, {"api_name": "os.path", "line_number": 241, "usage_type": "attribute"}, {"api_name": "ulmo.util.get_ulmo_dir", "line_number": 241, "usage_type": "call"}, {"api_name": "ulmo.util", "line_number": 241, "usage_type": "name"}, {"api_name": "past.builtins.basestring", "line_number": 243, "usage_type": "argument"}, {"api_name": "requests.get", "line_number": 250, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 252, "usage_type": "call"}, {"api_name": "os.path", "line_number": 252, "usage_type": "attribute"}, {"api_name": "ulmo.util.download_tiles", "line_number": 255, "usage_type": "call"}, {"api_name": "ulmo.util", "line_number": 255, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 273, "usage_type": "call"}, {"api_name": "os.path", "line_number": 273, "usage_type": "attribute"}, {"api_name": "ulmo.util.get_ulmo_dir", "line_number": 273, "usage_type": "call"}, {"api_name": "ulmo.util", "line_number": 273, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 278, "usage_type": "call"}, {"api_name": "os.path", "line_number": 278, "usage_type": "attribute"}, {"api_name": "ulmo.util.download_tiles", "line_number": 279, "usage_type": "call"}, {"api_name": "ulmo.util", "line_number": 279, "usage_type": "name"}]} +{"seq_id": "481767896", "text": "\"\"\"\nScript to build mongodb from Chemspider\n\n\"\"\"\n\nfrom pymongo import MongoClient\nfrom chemspipy import ChemSpider\nimport random\nfrom time import gmtime, strftime, sleep\n\nclient = MongoClient('localhost', 27017)\ndb = client.mydb\nchemspider = db.chemspider\n\ncs = ChemSpider('0064dc77-e5cb-4e86-93da-e8aedd62baa0')\n\ncsids = list(range(30001, 40001))\nrandom.shuffle(csids)\ndoc = {}\n\nfor csid in csids:\n se = chemspider.find_one({'_id': csid})\n if se is not None:\n print('{0} has in the mongoDB'.format(str(csid)))\n continue\n compound = cs.get_compound(csid)\n try:\n doc['_id'] = int(compound.csid)\n doc['common_name'] = compound.common_name\n sleep(random.uniform(0.2, 1.2))\n doc['molecular_weight'] = compound.molecular_weight\n sleep(random.uniform(0, 1.2))\n doc['molecular_formula'] = compound.molecular_formula\n doc['stdinchi'] = compound.stdinchi\n sleep(random.uniform(0, 1.2))\n doc['stdinchikey'] = compound.stdinchikey\n doc['smiles'] = compound.smiles\n sleep(random.uniform(1, 1.2))\n except Exception as e:\n print(str(e) + 'Invalid ID is ' + str(compound.csid))\n with open('Invalid_ID.txt', 'a') as invalid_id:\n invalid_id.write(str(compound.csid) + '\\n')\n continue\n try:\n chemspider.insert_one(doc)\n print('{0} has been inserted'.format(str(compound.csid)))\n except Exception as e:\n print(str(e) + \"data insert error\" + str(csid)+ '\\n')\n with open('mongodb_server_log.txt', 'a') as mongo_error_log:\n mongo_error_log.write(strftime(\"%Y-%m-%d\\t%H:%M:%S\" + \"---cisd \" + str(doc['_id']) + 'insert error \\n', gmtime()))", "sub_path": "chemspider2mongo-chemspipy.py", "file_name": "chemspider2mongo-chemspipy.py", "file_ext": "py", "file_size_in_byte": 1700, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pymongo.MongoClient", "line_number": 11, "usage_type": "call"}, {"api_name": "chemspipy.ChemSpider", "line_number": 15, "usage_type": "call"}, {"api_name": "random.shuffle", "line_number": 18, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 30, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 30, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 32, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 32, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 35, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 35, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 38, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 38, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 50, "usage_type": "call"}, {"api_name": "time.gmtime", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "596683211", "text": "#\n# -*- coding: utf-8 -*- vim:fileencoding=utf-8:\n# Copyright © 2010-2012 Greek Research and Technology Network (GRNET S.A.)\n#\n# Permission to use, copy, modify, and/or distribute this software for any\n# purpose with or without fee is hereby granted, provided that the above\n# copyright notice and this permission notice appear in all copies.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD\n# TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND\n# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,\n# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF\n# USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER\n# TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE\n# OF THIS SOFTWARE.\n\nfrom django.contrib import admin\nfrom ganetimgr.apply.models import *\nfrom ganetimgr.ganeti.models import Cluster\n\ndef make_fast_create_actions():\n actions = []\n for cluster in Cluster.objects.filter(fast_create=True):\n def _submit_applications(modeladmin, request, queryset):\n for app in queryset:\n if app.status == STATUS_PENDING:\n app.approve()\n\n if app.status == STATUS_APPROVED:\n app.cluster = cluster\n app.save()\n app.submit()\n\n _submit_applications.short_description = \"Approve and submit to %s\" % \\\n cluster.description\n # Change the function name, because the admin interface relies on it\n _submit_applications.func_name = \"submit_applications_%s\" % \\\n str(cluster.slug)\n actions.append(_submit_applications)\n return actions\n\n\nclass ApplicationAdmin(admin.ModelAdmin):\n list_display = [\"hostname\", \"applicant\", \"organization\", \"cluster\",\n \"network\", \"status\", \"filed\"]\n list_filter = [\"status\", \"network\", \"organization\"]\n list_editable = [\"organization\", \"network\"]\n readonly_fields = [\"job_id\", \"backend_message\"]\n ordering = [\"-filed\", \"hostname\"]\n actions = make_fast_create_actions()\n fieldsets = [\n ('Instance Information', {'fields': ('hostname', 'memory', 'disk_size',\n 'vcpus', 'operating_system',\n 'hosts_mail_server') }),\n ('Placement', {'fields': ('network',)}),\n ('Owner Information', {'fields': ('applicant', 'organization',\n 'admin_contact_name',\n 'admin_contact_phone',\n 'admin_contact_email')}),\n ('Backend Information', {'fields': ('status', 'job_id',\n 'backend_message')})\n ]\n\nadmin.site.register(Organization)\nadmin.site.register(InstanceApplication, ApplicationAdmin)\n", "sub_path": "apply/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 2953, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "ganetimgr.ganeti.models.Cluster.objects.filter", "line_number": 23, "usage_type": "call"}, {"api_name": "ganetimgr.ganeti.models.Cluster.objects", "line_number": 23, "usage_type": "attribute"}, {"api_name": "ganetimgr.ganeti.models.Cluster", "line_number": 23, "usage_type": "name"}, {"api_name": "django.contrib.admin.ModelAdmin", "line_number": 43, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 43, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 64, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 64, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 64, "usage_type": "name"}, {"api_name": "django.contrib.admin.site.register", "line_number": 65, "usage_type": "call"}, {"api_name": "django.contrib.admin.site", "line_number": 65, "usage_type": "attribute"}, {"api_name": "django.contrib.admin", "line_number": 65, "usage_type": "name"}]} +{"seq_id": "9021199", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nImplementation of neural network classifier for binary classification of\nmovie reviews with and without pretrained word embeddings (chapter 6.1.3 of \nChollet's \"Deep learning with Python\") using Pytorch\n\nDownload GloVe from: https://nlp.stanford.edu/projects/glove\nand place it in the folder \"./data/GloVe\"\n\"\"\"\n\nimport torch\nfrom torchtext.datasets import IMDB\n\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n#%% import dataset\nprint(\"Load IMDB data set\")\n\nMAX_LEN = 100 # max number of tokens from each text in IMDB data set\nMAX_TOKENS = 10000 # max number of tokens in vocabulary\n\nNUM_TRAIN_SAMPLES = 200 \nNUM_VALID_SAMPLES = 10000\nDATA_SET_LEN = NUM_TRAIN_SAMPLES + NUM_VALID_SAMPLES\n\ndata_set, test_set = IMDB(root=\"./data/IMDB\",split=(\"train\",\"test\"))\n\n#%% generate vocabulary\nprint(\"Generate vocabulary\")\n\nfrom helpers import gen_vocab\nfrom torchtext.data.utils import get_tokenizer\n\ntokenizer = get_tokenizer(tokenizer=\"basic_english\")\n\nvocabulary = gen_vocab(dataset=data_set, tokenizer=tokenizer,\n max_tokens=MAX_TOKENS, max_len=MAX_LEN)\n \n#%% translate texts to index list for embedding layer\nprint(\"Preprocess data set\")\n\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.dataset import random_split\nfrom helpers import collate_emb\n\ndata_set, test_set = IMDB(root=\"./data/IMDB\",split=(\"train\",\"test\"))\ndata_set_list = list(data_set)\n\nBATCH_SIZE = 32\n\nlabel_dict = {\"neg\":0,\"pos\":1}\n\ncollate_emb_batch = lambda batch : collate_emb(batch, vocabulary, tokenizer, \n MAX_LEN, label_dict, device)\n\ntrain_set_list, split_set_list = random_split(data_set_list, \n [NUM_TRAIN_SAMPLES,\n len(data_set_list)- \n NUM_TRAIN_SAMPLES])\nvalid_set_list, _ = random_split(split_set_list, [NUM_VALID_SAMPLES,\n len(split_set_list)- \n NUM_VALID_SAMPLES])\n\ntrain_loader = DataLoader(dataset=train_set_list,batch_size=BATCH_SIZE,\n drop_last=True, shuffle=True,\n collate_fn=collate_emb_batch)\nvalid_loader = DataLoader(dataset=valid_set_list, batch_size=BATCH_SIZE,\n shuffle=True, collate_fn=collate_emb_batch)\n\n#%% Load and preprocess GloVe word-embeddings\nprint(\"Load GloVe from .txt-file\")\n\nimport os\n\nglove_dir = \"./data/GloVe/\"\nglove_txt = \"glove.6B.100d.txt\"\nemb_index = {}\n\nwith open(os.path.join(glove_dir, glove_txt), encoding=\"utf8\") as f:\n lines = f.readlines()\n\nfor line in lines:\n values = line.split()\n token = values[0]\n coefs_f = [float(val) for val in values[1:]]\n coefs = torch.tensor(coefs_f, dtype=torch.float32)\n emb_index[token] = coefs\n\n#%% Preparing GloVe word embadding matrix\nprint(\"Define embedding matrix from GloVe\")\n\nEMB_DIM = 100\nemd_mat = torch.zeros((MAX_TOKENS,EMB_DIM))\n\ntoken_list = vocabulary.__dict__[\"stoi\"].keys()\n\nfor count_token, token in enumerate(token_list):\n try:\n emd_mat[count_token, :] = emb_index[token]\n except:\n pass\n\n#%% define neural net with embedding layer\nprint(\"Define neural network with pretrained embedding layer\")\n\nfrom torch import nn\nimport torch.nn.functional as F\n\nclass SeqNet(nn.Module):\n\n def __init__(self, max_tokens, max_len, emb_dim):\n super(SeqNet,self).__init__()\n self.max_len = max_len\n self.max_tokens = max_tokens\n self.EmbLayer = nn.Embedding(num_embeddings=max_tokens,\n embedding_dim=emb_dim)\n self.flatten = nn.Flatten(start_dim=1,end_dim=-1)\n self.fcl1 = nn.Linear(in_features=max_len*emb_dim,out_features=32)\n self.fcl2 = nn.Linear(in_features=32,out_features=1)\n \n def forward(self, x):\n x = x.view(-1,self.max_len)\n x = self.EmbLayer(x)\n x = self.flatten(x)\n x = F.relu(self.fcl1(x))\n x = self.fcl2(x)\n x = torch.sigmoid(x)\n \n return x\n\nnet = SeqNet(MAX_TOKENS, MAX_LEN, EMB_DIM)\n\n#%% set embedding layer to GloVe\nprint(\"Set embedding layer\")\n\nnet.EmbLayer.weight = nn.Parameter(emd_mat)\nnet.EmbLayer.requires_grad_ = False\nnet.to(device)\n\n#%% Train model\nprint(\"Train model\")\nimport torch.optim as optim\nfrom helpers import train_model\n\nNUM_EPOCHS = 10\n\noptimizer = optim.RMSprop(params=net.parameters(),lr=1e-3)\ncriterion = torch.nn.BCELoss()\n\nnet, history_dict = train_model(net, device, NUM_EPOCHS, optimizer, criterion, \n train_loader, valid_loader, BATCH_SIZE,\n classifier=\"binary\", calc_acc = True)\n\n#%% Plot results\nprint(\"Plot results\")\nfrom helpers import plot_results\n\nplot_results(history_dict)\n\n##############################################################################\n#%% -------------- Training without pretrained embedding ---------------------\n##############################################################################\n\nprint(\"Define neural network with embedding layer\")\n\nclass BinaryClassifier(nn.Module):\n\n def __init__(self, max_tokens, max_len, emb_dim):\n super(BinaryClassifier,self).__init__()\n self.max_tokens = max_tokens\n self.max_len = max_len\n self.EmbLayer = nn.Embedding(num_embeddings=max_tokens,\n embedding_dim= emb_dim)\n self.flatten = nn.Flatten(start_dim=1,end_dim=-1)\n self.fcl1 = nn.Linear(in_features=max_len*emb_dim,out_features=32)\n self.fcl2 = nn.Linear(in_features=32,out_features=1)\n \n def forward(self, x):\n x = x.view(-1,self.max_len)\n x = self.EmbLayer(x)\n x = self.flatten(x)\n x = F.relu(self.fcl1(x))\n x = torch.sigmoid(self.fcl2(x))\n \n return x\n \n#%% Train model\nprint(\"Train model\")\nnet = BinaryClassifier(MAX_TOKENS, MAX_LEN, EMB_DIM).to(device)\noptimizer = optim.RMSprop(params=net.parameters(),lr=1e-3)\ncriterion = torch.nn.BCELoss()\n\nnet, history_dict = train_model(net, device, NUM_EPOCHS, optimizer, criterion, \n train_loader, valid_loader, BATCH_SIZE,\n classifier=\"binary\", calc_acc = True)\n\n#%% Plot results\nprint(\"Plot results\")\nfrom helpers import plot_results\n\nplot_results(history_dict)\n", "sub_path": "Chollet_6_1_IMDB_binary_classification_pretrained_word_embeddings_Pytorch.py", "file_name": "Chollet_6_1_IMDB_binary_classification_pretrained_word_embeddings_Pytorch.py", "file_ext": "py", "file_size_in_byte": 6451, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "torch.device", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.cuda.is_available", "line_number": 14, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 14, "usage_type": "attribute"}, {"api_name": "torchtext.datasets.IMDB", "line_number": 26, "usage_type": "call"}, {"api_name": "torchtext.data.utils.get_tokenizer", "line_number": 34, "usage_type": "call"}, {"api_name": "helpers.gen_vocab", "line_number": 36, "usage_type": "call"}, {"api_name": "torchtext.datasets.IMDB", "line_number": 46, "usage_type": "call"}, {"api_name": "helpers.collate_emb", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.utils.data.dataset.random_split", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.utils.data.dataset.random_split", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 64, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 67, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "torch.tensor", "line_number": 86, "usage_type": "call"}, {"api_name": "torch.float32", "line_number": 86, "usage_type": "attribute"}, {"api_name": "torch.zeros", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 109, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 115, "usage_type": "name"}, {"api_name": "torch.nn.Flatten", "line_number": 117, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 117, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 118, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 119, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 119, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 125, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 125, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.nn.Parameter", "line_number": 136, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 136, "usage_type": "name"}, {"api_name": "torch.optim.RMSprop", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 147, "usage_type": "name"}, {"api_name": "torch.nn.BCELoss", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 148, "usage_type": "attribute"}, {"api_name": "helpers.train_model", "line_number": 150, "usage_type": "call"}, {"api_name": "helpers.plot_results", "line_number": 158, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 166, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 166, "usage_type": "name"}, {"api_name": "torch.nn.Embedding", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 172, "usage_type": "name"}, {"api_name": "torch.nn.Flatten", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 174, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 175, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 176, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 176, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 182, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 182, "usage_type": "name"}, {"api_name": "torch.sigmoid", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.optim.RMSprop", "line_number": 190, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 190, "usage_type": "name"}, {"api_name": "torch.nn.BCELoss", "line_number": 191, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 191, "usage_type": "attribute"}, {"api_name": "helpers.train_model", "line_number": 193, "usage_type": "call"}, {"api_name": "helpers.plot_results", "line_number": 201, "usage_type": "call"}]} +{"seq_id": "330146747", "text": "import torch.nn as nn\nimport timm\nfrom fastai.torch_core import to_device, apply_init\nfrom fastai.basic_train import Learner\nfrom fastai.vision.models import DynamicUnet\n\n\ndef load_efficient_net(b, pretrained=True):\n m = timm.create_model('efficientnet_'+b, pretrained=pretrained)\n m = nn.Sequential(*list(m.children())[:-4])\n children = m[2].children()\n del m[2]\n for k, c in enumerate(children):\n m.add_module(str(k+2), c)\n return m\n\n\ndef efficientunet_learner(data, b, pretrained=True, blur_final=True,\n blur=False, self_attention=False,\n y_range=None, last_cross=True,\n bottle=False, cut=None, **learn_kwargs):\n def split_on(m):\n return (m[0][7], m[1])\n body = load_efficient_net(b, pretrained)\n try:\n size = data.train_ds[0][0].size\n except AttributeError:\n size = next(iter(data.train_dl))[0].shape[-2:]\n model = to_device(\n DynamicUnet(\n body, n_classes=data.c, img_size=size, blur=blur,\n blur_final=blur_final,\n self_attention=self_attention, y_range=y_range,\n last_cross=last_cross, bottle=bottle),\n data.device)\n learn = Learner(data, model, **learn_kwargs)\n learn.split(split_on)\n if pretrained:\n learn.freeze()\n apply_init(model[2], nn.init.kaiming_normal_)\n return learn\n", "sub_path": "src/nn/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1400, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "timm.create_model", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.nn.Sequential", "line_number": 10, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 10, "usage_type": "name"}, {"api_name": "fastai.torch_core.to_device", "line_number": 29, "usage_type": "call"}, {"api_name": "fastai.vision.models.DynamicUnet", "line_number": 30, "usage_type": "call"}, {"api_name": "fastai.basic_train.Learner", "line_number": 36, "usage_type": "call"}, {"api_name": "fastai.torch_core.apply_init", "line_number": 40, "usage_type": "call"}, {"api_name": "torch.nn.init", "line_number": 40, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 40, "usage_type": "name"}]} +{"seq_id": "358997365", "text": "# coding: UTF-8\n\nfrom django.core import urlresolvers\nfrom django.db import models\nfrom django.db.models.fields import SmallIntegerField\n\nfrom e_travel.apps.regional_branch.models import RegionalBranch\nfrom e_travel.common.fields import AwesomeAutoSlug\n\n\nclass Partners(models.Model):\n \"\"\" Партнеры\n \"\"\"\n name = models.CharField(max_length=30, verbose_name=u'Наименование')\n link = models.URLField(verbose_name=u'Ссылка на ресурс')\n\n class Meta:\n verbose_name = u'Партнер'\n verbose_name_plural = u'Партнеры'\n\n def __unicode__(self):\n return self.name\n\n\nclass Evaluation(models.Model):\n \"\"\" Отзывы\n \"\"\"\n name = models.CharField(max_length=50, verbose_name=u'Имя')\n email = models.EmailField(verbose_name=u'E-mail')\n photo = models.ImageField(upload_to='admin_uploads/evaluations/photos', blank=True,\n verbose_name=u'Ваше фото')\n creation_date = models.DateField(auto_now_add=True)\n resting_place = models.CharField(max_length=50, verbose_name=u\"Место отдыха\")\n general_evaluation = models.TextField(blank=True, verbose_name=u\"Оцените работу нашей компании\")\n manager_evaluation = models.TextField(blank=True, verbose_name=u\"Оцените работу менеджера\")\n is_approved = models.BooleanField(default=False, verbose_name=u\"Подтвердить\",\n help_text=u'Поставьте галочку, чтобы данный отзыв был виден '\n u'пользователям сайта')\n rating = SmallIntegerField(blank=True, null=True)\n\n def get_admin_url(self):\n return urlresolvers.reverse(\n \"admin:%s_%s_change\" % (self._meta.app_label, self._meta.model_name),\n args=(self.id, )\n )\n\n class Meta:\n verbose_name = u'Отзыв'\n verbose_name_plural = u'Отзывы'\n ordering = ['-creation_date']\n\n def __unicode__(self):\n return u'Отзыв от %s' % self.name\n\n\nclass Link(models.Model):\n \"\"\" Ссылки\n \"\"\"\n name = models.CharField(verbose_name=u'Наименование', max_length=30)\n slug = AwesomeAutoSlug(populate_from='name', overwrite=True)\n content = models.TextField(verbose_name=u'Содержание', blank=True)\n\n class Meta:\n verbose_name = u'Ссылка'\n verbose_name_plural = u'Ссылки'\n\n\nclass PageEnum(object):\n\n MAIN_PAGE = 'MP'\n COUNTRY_PAGE = 'CP'\n SEARCH_TOUR_PAGE = 'STP'\n OFFER_PAGE = 'OP'\n LEAVE_ORDER_PAGE = 'LOP'\n ABOUT_PAGE = 'AP'\n CONTACT_PAGE = 'COP'\n REST_TYPE_DESC_PAGE = 'RTDP'\n PRICE_OFFER_DESC_PAGE = 'PODP'\n COUNTRY_DESC_PAGE = 'CDP'\n RESORT_DESC_PAGE = 'RDP'\n HOTEL_DESC_PAGE = 'HDP'\n SPEC_OFFER_DESC_PAGE = 'SODP'\n UNIVERSAL_PAGE = 'UP'\n\n choices = (\n (UNIVERSAL_PAGE, u'Универсальный модуль. (виден на всех страницах)'),\n (MAIN_PAGE, u'Главная страница'),\n (COUNTRY_PAGE, u'Страница - \"Страны\"'),\n (SEARCH_TOUR_PAGE, u'Страница - \"Поиск Туров\"'),\n (OFFER_PAGE, u'Страница - \"Акции\"'),\n (LEAVE_ORDER_PAGE, u'Страница - \"Оставить заявку\"'),\n (ABOUT_PAGE, u'Страница - \"О нас\"'),\n (CONTACT_PAGE, u'Страница - \"Контакты\"'),\n (REST_TYPE_DESC_PAGE, u'Страница с описанием \"типов предложений\"'),\n (COUNTRY_DESC_PAGE, u'Страница с описанием - \"стран\"'),\n (RESORT_DESC_PAGE, u'Страница с описанием - \"курортов\"'),\n (HOTEL_DESC_PAGE, u'Страница с описанием - \"отелей\"'),\n (SPEC_OFFER_DESC_PAGE, u'Страница с описанием - \"спец. предл-ий\"'),\n (PRICE_OFFER_DESC_PAGE, u'Страница с описанием - \"ценовых предложений\"')\n )\n\n templates = {\n 'main/main.html': MAIN_PAGE,\n 'main/countries.html': CONTACT_PAGE,\n 'main/poisk.html': SEARCH_TOUR_PAGE,\n 'offers/offers.html': OFFER_PAGE,\n 'main/tour-order.html': LEAVE_ORDER_PAGE,\n 'main/about.html': ABOUT_PAGE,\n 'main/contacts.html': CONTACT_PAGE,\n 'special_offers/resttype_detail.html': REST_TYPE_DESC_PAGE,\n 'special_offers/country_detail.html': COUNTRY_DESC_PAGE,\n 'special_offers/resort_detail.html': RESORT_DESC_PAGE,\n 'special_offers/hotel_detail.html': HOTEL_DESC_PAGE,\n 'special_offers/specialoffer_detail.html': SPEC_OFFER_DESC_PAGE,\n 'special_offers/priceoffer_detail.html': PRICE_OFFER_DESC_PAGE\n }\n\n view_names = {\n MAIN_PAGE: 'main_main_page',\n CONTACT_PAGE:'main_contact_page',\n SEARCH_TOUR_PAGE: 'main_poisk_page',\n LEAVE_ORDER_PAGE: 'main_order_tour_page',\n ABOUT_PAGE: 'main_about_page',\n COUNTRY_PAGE: 'main_countries_page',\n OFFER_PAGE: 'offers_offers_page',\n REST_TYPE_DESC_PAGE: 'get_rest_type',\n COUNTRY_DESC_PAGE: 'get_country',\n RESORT_DESC_PAGE: 'get_resort',\n HOTEL_DESC_PAGE: 'get_hotel',\n SPEC_OFFER_DESC_PAGE: 'get_spec_offer',\n PRICE_OFFER_DESC_PAGE: 'get_price_offer'\n }\n\n @classmethod\n def get_verbose_name_by_key(cls, key):\n return dict(cls.choices).get(key, key)\n\n\nclass ExtraModule(models.Model):\n \"\"\" Доп. модули\n \"\"\"\n name = models.CharField(verbose_name=u'Наименование', max_length=30,\n help_text=u'Укажите имя модуля, поясняющее его назначение. '\n u'Например - google analytics')\n templates = models.CharField(verbose_name=u'Целевая страница', choices=PageEnum.choices,\n max_length=5)\n branches = models.ManyToManyField(RegionalBranch, verbose_name=u'Региональное отделение')\n code = models.TextField(verbose_name=u'Код')\n active = models.BooleanField(verbose_name=u'Активность', default=True)\n\n class Meta:\n verbose_name = u'Внешний модуль'\n verbose_name_plural = u'Внешние модули'\n", "sub_path": "e_travel/apps/main/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 6486, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.db.models.Model", "line_number": 11, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 11, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.URLField", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 28, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 28, "usage_type": "name"}, {"api_name": "django.db.models.EmailField", "line_number": 29, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 29, "usage_type": "name"}, {"api_name": "django.db.models.ImageField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.fields.SmallIntegerField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 42, "usage_type": "call"}, {"api_name": "django.core.urlresolvers", "line_number": 42, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 56, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 56, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 59, "usage_type": "name"}, {"api_name": "e_travel.common.fields.AwesomeAutoSlug", "line_number": 60, "usage_type": "call"}, {"api_name": "django.db.models.TextField", "line_number": 61, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 61, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 139, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 139, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 142, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 142, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 145, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 145, "usage_type": "name"}, {"api_name": "django.db.models.ManyToManyField", "line_number": 147, "usage_type": "call"}, {"api_name": "e_travel.apps.regional_branch.models.RegionalBranch", "line_number": 147, "usage_type": "argument"}, {"api_name": "django.db.models", "line_number": 147, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 148, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 148, "usage_type": "name"}, {"api_name": "django.db.models.BooleanField", "line_number": 149, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 149, "usage_type": "name"}]} +{"seq_id": "590560405", "text": "from distantbes.proto.proto import (\n build_event_stream_pb2 as bes,\n build_events_pb2 as be,\n publish_build_event_pb2 as pbe, \n )\nfrom google.protobuf.timestamp_pb2 import Timestamp\nimport uuid\n\ndef dict_to_workspace_status(ws_dict):\n ws = bes.WorkspaceStatus()\n\n for key, value in ws_dict.items():\n if value is not None:\n wsi = bes.WorkspaceStatus.Item(key=key, value=value)\n ws.item.append(wsi)\n\n return ws\n\ndef get_current_timestamp():\n now = Timestamp()\n now.GetCurrentTime()\n return now.ToMilliseconds()\n\ndef generate_last_message(sequence_number, stream_id):\n build_event = be.BuildEvent()\n finish = be.BuildEvent.BuildComponentStreamFinished(type=1)\n build_event.component_stream_finished.CopyFrom(finish)\n\n obe = pbe.OrderedBuildEvent()\n obe.sequence_number = sequence_number\n obe.event.CopyFrom(build_event)\n obe.stream_id.CopyFrom(stream_id)\n\n pbtesr = pbe.PublishBuildToolEventStreamRequest()\n pbtesr.ordered_build_event.CopyFrom(obe)\n\n return pbtesr\n\ndef generate_stream_id(invocation_id=None, build_id=None):\n if invocation_id is None:\n invocation_id = str(uuid.uuid4())\n\n if build_id is None:\n build_id = str(uuid.uuid4())\n\n sid = be.StreamId()\n sid.invocation_id = invocation_id\n sid.build_id = build_id\n sid.component = 3\n \n return sid\n\ndef pack_bazel_event(sequence_number, stream_id, bazel_event, project_id=\"distant-bes\"):\n build_event = be.BuildEvent()\n build_event.bazel_event.Pack(bazel_event)\n\n build_event.event_time.GetCurrentTime()\n\n obe = pbe.OrderedBuildEvent()\n obe.sequence_number = sequence_number\n obe.event.CopyFrom(build_event)\n obe.stream_id.CopyFrom(stream_id)\n\n pbtesr = pbe.PublishBuildToolEventStreamRequest()\n pbtesr.ordered_build_event.CopyFrom(obe)\n \n pbtesr.project_id = project_id\n\n return pbtesr\n", "sub_path": "distantbes/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1921, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "distantbes.proto.proto.build_event_stream_pb2.WorkspaceStatus", "line_number": 10, "usage_type": "call"}, {"api_name": "distantbes.proto.proto.build_event_stream_pb2", "line_number": 10, "usage_type": "name"}, {"api_name": "distantbes.proto.proto.build_event_stream_pb2.WorkspaceStatus.Item", "line_number": 14, "usage_type": "call"}, {"api_name": "distantbes.proto.proto.build_event_stream_pb2.WorkspaceStatus", "line_number": 14, "usage_type": "attribute"}, {"api_name": "distantbes.proto.proto.build_event_stream_pb2", "line_number": 14, "usage_type": "name"}, {"api_name": "google.protobuf.timestamp_pb2.Timestamp", "line_number": 20, "usage_type": "call"}, {"api_name": "distantbes.proto.proto.build_events_pb2.BuildEvent", "line_number": 25, "usage_type": "call"}, {"api_name": "distantbes.proto.proto.build_events_pb2", "line_number": 25, "usage_type": "name"}, {"api_name": "distantbes.proto.proto.build_events_pb2.BuildEvent.BuildComponentStreamFinished", "line_number": 26, "usage_type": "call"}, {"api_name": "distantbes.proto.proto.build_events_pb2.BuildEvent", "line_number": 26, "usage_type": "attribute"}, {"api_name": "distantbes.proto.proto.build_events_pb2", "line_number": 26, "usage_type": "name"}, {"api_name": "distantbes.proto.proto.publish_build_event_pb2.OrderedBuildEvent", "line_number": 29, "usage_type": "call"}, {"api_name": "distantbes.proto.proto.publish_build_event_pb2", "line_number": 29, "usage_type": "name"}, {"api_name": "distantbes.proto.proto.publish_build_event_pb2.PublishBuildToolEventStreamRequest", "line_number": 34, "usage_type": "call"}, {"api_name": "distantbes.proto.proto.publish_build_event_pb2", "line_number": 34, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 41, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 44, "usage_type": "call"}, {"api_name": "distantbes.proto.proto.build_events_pb2.StreamId", "line_number": 46, "usage_type": "call"}, {"api_name": "distantbes.proto.proto.build_events_pb2", "line_number": 46, "usage_type": "name"}, {"api_name": "distantbes.proto.proto.build_events_pb2.BuildEvent", "line_number": 54, "usage_type": "call"}, {"api_name": "distantbes.proto.proto.build_events_pb2", "line_number": 54, "usage_type": "name"}, {"api_name": "distantbes.proto.proto.publish_build_event_pb2.OrderedBuildEvent", "line_number": 59, "usage_type": "call"}, {"api_name": "distantbes.proto.proto.publish_build_event_pb2", "line_number": 59, "usage_type": "name"}, {"api_name": "distantbes.proto.proto.publish_build_event_pb2.PublishBuildToolEventStreamRequest", "line_number": 64, "usage_type": "call"}, {"api_name": "distantbes.proto.proto.publish_build_event_pb2", "line_number": 64, "usage_type": "name"}]} +{"seq_id": "612291202", "text": "# Import libraries\r\nimport pandas as pd\r\nimport pymysql\r\nfrom pip._vendor.distlib.compat import raw_input\r\nfrom pymongo import MongoClient\r\nimport pymongo\r\nfrom decimal import Decimal\r\nimport numpy as np\r\nfrom datetime import datetime, timedelta\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass datetimedays:\r\n def __init__(self):\r\n last_day_one = datetime.now() - timedelta(days=1)\r\n last_day_three = datetime.now() - timedelta(days=3)\r\n last_day_two = datetime.now() - timedelta(days=2)\r\n last_day_four = datetime.now() - timedelta(days=4)\r\n last_day_five = datetime.now() - timedelta(days=5)\r\n last_day_six = datetime.now() - timedelta(days=6)\r\n last_day_seven = datetime.now() - timedelta(days=7)\r\n last_day_eight = datetime.now() - timedelta(days=8)\r\n last_day_nine = datetime.now() - timedelta(days=9)\r\n last_day_ten = datetime.now() - timedelta(days=10)\r\n last_eleven = datetime.now() - timedelta(days=11)\r\n self.one_day_ago = last_day_one.strftime(\"%Y-%m-%d %H:%M\")\r\n self.two_days_ago = last_day_two.strftime(\"%Y-%m-%d %H:%M\")\r\n self.three_days_ago = last_day_three.strftime(\"%Y-%m-%d %H:%M\")\r\n self.four_dyas_ago = last_day_four.strftime(\"%Y-%m-%d %H:%M\")\r\n self.fife_days_ago = last_day_five.strftime(\"%Y-%m-%d %H:%M\")\r\n self.six_days_ago = last_day_six.strftime(\"%Y-%m-%d %H:%M\")\r\n self.seven_days_ago = last_day_seven.strftime(\"%Y-%m-%d %H:%M\")\r\n self.eight_days_ago = last_day_eight.strftime(\"%Y-%m-%d %H:%M\")\r\n self.nine_days_ago = last_day_nine.strftime(\"%Y-%m-%d %H:%M\")\r\n self.ten_days_ago = last_day_ten.strftime(\"%Y-%m-%d %H:%M\")\r\n self.eleven_days_ago = last_eleven.strftime(\"%Y-%m-%d %H:%M\")\r\n\r\n ################################# Mong db result ##################################\r\n # Connection to the MongoDB Server\r\n mongoClient = MongoClient(\"mongodb://localhost:27017\")\r\n # Connection to the database\r\n db = mongoClient[\"petness\"]\r\n # Collection\r\n collection = db[\"dog\"]\r\n self.collection = collection\r\n today = datetime.now().strftime('%Y-%m-%d')\r\n # detailsrow = collection.find({'date': {\"$regex\": self.thre_days_ago}})\r\n detailsrow = collection.find({'date': {\"$regex\": self.three_days_ago}})\r\n detailsrow2 = collection.find({'date': {\"$regex\": self.one_day_ago}})\r\n # detail = collection.find()\r\n # df = pd.DataFrame(list(detailsrow2))\r\n # df2 = pd.DataFrame(list(detailsrow))\r\n # df3 = pd.DataFrame(list(detailsrowtoday))\r\n # df[df['activity'] == 0].count()\r\n # df.groupby('activity').activity.count()\r\n # df[df['activity'] == 1].count()\r\n # df.asfreq(freq='5S')\r\n # g = df.groupby(\"activity\")\r\n # g2 = df2.groupby(\"activity\")\r\n # g3 = df3.groupby(\"activity\")\r\n # df.groupby(\"activity\"(freq='D')).ffill()\r\n # tday = g.get_group(0).count() * 5 #get activity belog to 0,1,2,3\r\n # two = g2.get_group(0).count() * 5\r\n # four = g3.get_group(0).count() * 5\r\n # two = g.get_group(2).count()\r\n # three = ((tday + two) / 2) / four * 100 - 100\r\n # self.three = three\r\n today = datetime.now().strftime(\"%Y-%m-%d %H:%M\")\r\n # detailsrow = collection.find({'date': {\"$regex\": self.thre_days_ago}})\r\n #detailsrow = collection.find({'date': {\"$regex\": self.three_days_ago}})\r\n #detailsrow2 = collection.find({'date': {\"$regex\": self.one_day_ago}})\r\n # detail = collection.find()\r\n # df = pd.DataFrame(list(detailsrow2))\r\n # df2 = pd.DataFrame(list(detailsrow))\r\n # df3 = pd.DataFrame(list(detailsrowtoday))\r\n # df[df['activity'] == 0].count()\r\n # df.groupby('activity').activity.count()\r\n # df[df['activity'] == 1].count()\r\n # df.asfreq(freq='5S')\r\n # g = df.groupby(\"activity\")\r\n # g2 = df2.groupby(\"activity\")\r\n # g3 = df3.groupby(\"activity\")\r\n # df.groupby(\"activity\"(freq='D')).ffill()\r\n # tday = g.get_group(0).count() * 5 #get activity belog to 0,1,2,3\r\n # two = g2.get_group(0).count() * 5\r\n # four = g3.get_group(0).count() * 5\r\n # two = g.get_group(2).count()\r\n # three = ((tday + two) / 2) / four * 100 - 100\r\n # self.three = three\r\n detailsrowtoday = collection.find({'date': {'$gte': self.one_day_ago, '$lte': today}}) #add 24 hours date obtainong\r\n dftoday = pd.DataFrame(list(detailsrowtoday))\r\n gtaday = dftoday.groupby(\"activity\")\r\n try:\r\n self.resttoday = gtaday.get_group(3).count() * 5 / 60# get activity belog to 0,1,2,3\r\n except:\r\n self.resttoday = 0\r\n try:\r\n self.standtoday = gtaday.get_group(2).count() * 5 / 60 # get activity belog to 0,1,2,3\r\n except:\r\n self.standtoday = 0\r\n try:\r\n self.runningtoday = gtaday.get_group(0).count() * 5 / 60\r\n except:\r\n self.runningtoday = 0\r\n try:\r\n self.walkingtoday = gtaday.get_group(1).count() * 5 / 60\r\n except:\r\n self.walkingtoday = 0\r\n\r\n\r\n\r\n################################# K days functions###################################################\r\n def twodaysrest(self):\r\n detailsrow1 = self.collection.find({'date': {'$gte': self.two_days_ago, '$lte': self.one_day_ago}})\r\n detailsrow2 = self.collection.find({'date': {'$gte': self.three_days_ago, '$lte': self.two_days_ago}})\r\n\r\n df = pd.DataFrame(list(detailsrow1)) # one day ago\r\n df2 = pd.DataFrame(list(detailsrow2))\r\n\r\n g = df.groupby(\"activity\") # one day ago\r\n g2 = df2.groupby(\"activity\")\r\n\r\n krest = g.get_group(3).count() * 5 # get activity belog to 0,1,2,3\r\n kstand = g.get_group(2).count() * 5 # get activity belog to 0,1,2,3\r\n kruning = g.get_group(0).count() * 5 # get activity belog to 0,1,2,3\r\n kwalking = g.get_group(1).count() * 5 # get activity belog to 0,1,2,3\r\n\r\n kresttwo = g2.get_group(3).count() * 5 # get activity belog to 0,1,2,3\r\n kstandtwo = g2.get_group(2).count() * 5 # get activity belog to 0,1,2,3\r\n kruningtwo = g2.get_group(0).count() * 5 # get activity belog to 0,1,2,3\r\n kwalkingtwo = g2.get_group(1).count() * 5 # get activity belog to 0,1,2,3\r\n\r\n\r\n\r\n totalrest = (pd.to_numeric(((self.resttoday / ((krest + kresttwo) / 2)) * 100).loc['activity']))\r\n totalstand = (pd.to_numeric(((self.standtoday / ((kstand + kstandtwo) / 2)) * 100).loc['activity']))\r\n totalrunning = (pd.to_numeric(((self.runningtoday / ((kruning + kruningtwo) / 2)) * 100).loc['activity']))\r\n totalwalking = (pd.to_numeric(((self.walkingtoday / ((kwalking + kwalkingtwo) / 2)) * 100).loc['activity']))\r\n\r\n #calculation = pd.to_numeric(totalrest.loc['activity'])\r\n print(kstand)\r\n print(kstandtwo)\r\n\r\n\r\n N = 4\r\n\r\n yesterday = (totalrest, totalstand, totalrunning, totalwalking)\r\n today = (1, 1, 1, 1)\r\n menStd = (3, 3, 3, 3)\r\n womenStd = (3, 3, 3, 3)\r\n ind = np.arange(N) # the x locations for the groups\r\n width = 0.65 # the width of the bars: can also be len(x) sequence\r\n\r\n p1 = plt.bar(ind, today, width, yerr=menStd)\r\n p2 = plt.bar(ind, yesterday, width,\r\n bottom=today, yerr=womenStd)\r\n\r\n plt.ylabel('Scores')\r\n plt.title('Scores by group and gender')\r\n plt.xticks(ind, ('Resting', 'Standing', 'Running', 'Walking'))\r\n plt.yticks(np.arange(0, 200, 20))\r\n plt.legend((p1[0], p2[0]), ('Today', '2K_days'))\r\n\r\n plt.show()\r\n\r\n\r\n # plt.plot([totalrest, totalstand, totalrunning, totalwalking])\r\n #plt.plot([1,-2,3,4,-5])\r\n #plt.ylabel('action %')\r\n #plt.show()\r\n df2.plot.bar();\r\n\r\n def threedaysresult(self):\r\n\r\n detailsrow1 = self.collection.find({'date': {\"$regex\": self.one_day_ago}})\r\n detailsrow2 = self.collection.find({'date': {\"$regex\": self.two_days_ago}})\r\n detailsrow3 = self.collection.find({'date': {\"$regex\": self. three_days_ago}})\r\n\r\n df = pd.DataFrame(list(detailsrow1)) # one day ago\r\n df2 = pd.DataFrame(list(detailsrow2))\r\n df3 = pd.DataFrame(list(detailsrow3))\r\n\r\n g = df.groupby(\"activity\") # one day ago\r\n g2 = df2.groupby(\"activity\")\r\n g3 = df3.groupby(\"activity\")\r\n\r\n krest = g.get_group(3).count() * 5 # get activity belog to 0,1,2,3\r\n kstand = g.get_group(2).count() * 5 # get activity belog to 0,1,2,3\r\n kruning = g.get_group(0).count() * 5 # get activity belog to 0,1,2,3\r\n kwalking = g.get_group(1).count() * 5 # get activity belog to 0,1,2,3\r\n\r\n kresttwo = g2.get_group(3).count() * 5 # get activity belog to 0,1,2,3\r\n kstandtwo = g2.get_group(2).count() * 5 # get activity belog to 0,1,2,3\r\n kruningtwo = g2.get_group(0).count() * 5 # get activity belog to 0,1,2,3\r\n kwalkingtwo = g2.get_group(1).count() * 5 # get activity belog to 0,1,2,3\r\n\r\n krestthree = g3.get_group(3).count() * 5 # get activity belog to 0,1,2,3\r\n kstandthree = g3.get_group(2).count() * 5 # get activity belog to 0,1,2,3\r\n kruningtthree = g3.get_group(1).count() * 5 # get activity belog to 0,1,2,3\r\n kwalkingthree = g3.get_group(2).count() * 5 # get activity belog to 0,1,2,3\r\n\r\n totalrest = (pd.to_numeric((100 - ((self.resttoday / ((krest + kresttwo + krestthree) / 3)) * 100)).loc['activity']))\r\n totalstand = (pd.to_numeric((100 - ((self.resttoday / ((kstand + kstandtwo + kstandthree) / 3)) * 100)).loc['activity']))\r\n totalrunning = (pd.to_numeric((100 - ((self.resttoday / ((kruning + kruningtwo + kruningtthree) / 3)) * 100)).loc['activity']))\r\n totalwalking = (pd.to_numeric((100 - ((self.resttoday / ((kwalking + kwalkingtwo + kwalkingthree) / 3)) * 100)).loc['activity']))\r\n\r\n # calculation = pd.to_numeric(totalrest.loc['activity'])\r\n #print(krest)\r\n #print(kresttwo)\r\n #print(krestthree)\r\n\r\n N = 4\r\n\r\n yesterday = (totalrest, totalstand, totalrunning, totalwalking)\r\n today = (100, 100, 100, 100)\r\n menStd = (3, 3, 3, 3)\r\n womenStd = (3, 3, 3, 3)\r\n ind = np.arange(N) # the x locations for the groups\r\n width = 0.65 # the width of the bars: can also be len(x) sequence\r\n\r\n p1 = plt.bar(ind, today, width, yerr=menStd)\r\n p2 = plt.bar(ind, yesterday, width,\r\n bottom=today, yerr=womenStd)\r\n\r\n plt.ylabel('Scores')\r\n plt.title('Scores by group and gender')\r\n plt.xticks(ind, ('Resting', 'Standing', 'Running', 'Walking'))\r\n plt.yticks(np.arange(0, 200, 20))\r\n plt.legend((p1[0], p2[0]), ('Today', '3K_days'))\r\n\r\n plt.show()\r\n\r\n # plt.plot([totalrest, totalstand, totalrunning, totalwalking])\r\n # plt.plot([1,-2,3,4,-5])\r\n # plt.ylabel('action %')\r\n # plt.show()\r\n df2.plot.bar();\r\n\r\n\r\n\r\n#######################################main function###################################################\r\n def main(self, choselist):\r\n detailsrow1 = self.collection.find( {'date': {'$gte': self.two_days_ago, '$lte': self.one_day_ago}})\r\n detailsrow2 = self.collection.find({'date': {'$gte': self.three_days_ago, '$lte': self.two_days_ago}})\r\n detailsrow3 = self.collection.find({'date': {'$gte': self.four_dyas_ago, '$lte': self.three_days_ago}})\r\n detailsrow4 = self.collection.find({'date': {'$gte': self.fife_days_ago, '$lte': self.four_dyas_ago}})\r\n detailsrow5 = self.collection.find({'date': {'$gte': self.six_days_ago, '$lte': self.fife_days_ago}})\r\n detailsrow6 = self.collection.find({'date': {'$gte': self.seven_days_ago, '$lte': self.six_days_ago}})\r\n detailsrow7 = self.collection.find({'date': {'$gte': self.eight_days_ago, '$lte': self.seven_days_ago}})\r\n detailsrow8 = self.collection.find({'date': {'$gte': self.nine_days_ago, '$lte': self.eight_days_ago}})\r\n detailsrow9 = self.collection.find({'date': {'$gte': self.ten_days_ago, '$lte': self.nine_days_ago}})\r\n detailsrow10 = self.collection.find({'date': {'$gte': self.eleven_days_ago, '$lte': self.ten_days_ago}})\r\n\r\n\r\n df = pd.DataFrame(list(detailsrow1)) # one day ago\r\n df2 = pd.DataFrame(list(detailsrow2))\r\n df3 = pd.DataFrame(list(detailsrow3))\r\n df4 = pd.DataFrame(list(detailsrow4))\r\n df5 = pd.DataFrame(list(detailsrow5))\r\n df6 = pd.DataFrame(list(detailsrow6))\r\n df7 = pd.DataFrame(list(detailsrow7))\r\n df8 = pd.DataFrame(list(detailsrow8))\r\n df9 = pd.DataFrame(list(detailsrow9))\r\n df10 = pd.DataFrame(list(detailsrow10))\r\n\r\n\r\n g = df.groupby(\"activity\") # one day ago\r\n g2 = df2.groupby(\"activity\")\r\n g3 = df3.groupby(\"activity\")\r\n g4 = df4.groupby(\"activity\") # one day ago\r\n g5 = df5.groupby(\"activity\")\r\n g6 = df6.groupby(\"activity\")\r\n try:\r\n g7 = df7.groupby(\"activity\") # one day ago\r\n except:\r\n g7 = 0\r\n try:\r\n g8 = df8.groupby(\"activity\")\r\n except:\r\n g8 = 0\r\n g9 = df9.groupby(\"activity\")\r\n g10 = df10.groupby(\"activity\")\r\n #print(g9)\r\n\r\n ###################### day 1 count action ##############\r\n try:\r\n krest = g.get_group(3).count()* 5 / 60 # get activity belog to 0,1,2,3\r\n except:\r\n krest = 0\r\n try:\r\n kstand = g.get_group(2).count() * 5 / 60 # get activity belog to 0,1,2,3\r\n except:\r\n kstand = 0\r\n try:\r\n kruning = g.get_group(0).count() * 5 / 60 # get activity belog to 0,1,2,3\r\n except:\r\n kruning = 0\r\n try:\r\n kwalking = g.get_group(1).count() * 5 / 60 # get activity belog to 0,1,2,3\r\n except:\r\n kwalking = 0\r\n\r\n ###################### day 2 count action ################\r\n try:\r\n kresttwo = g2.get_group(3).count() * 5 / 60 # get activity belog to 0,1,2,3\r\n except:\r\n kresttwo = 0\r\n try:\r\n kstandtwo = g2.get_group(2).count() * 5 / 60# get activity belog to 0,1,2,3\r\n except:\r\n kstandtwo = 0\r\n try:\r\n kruningtwo = g2.get_group(0).count() * 5 / 60 # get activity belog to 0,1,2,3\r\n except:\r\n kruningtwo = 0\r\n try:\r\n kwalkingtwo = g2.get_group(1).count() * 5 / 60# get activity belog to 0,1,2,3\r\n except:\r\n kwalkingtwo = 0\r\n\r\n ################### day 3 count action ###################\r\n try:\r\n krestthree = g3.get_group(3).count() * 5 / 60# get activity belog to 0,1,2,3\r\n except:\r\n krestthree = 0\r\n try:\r\n kstandthree = g3.get_group(2).count() * 5 / 60# get activity belog to 0,1,2,3\r\n except:\r\n kstandthree = 0\r\n try:\r\n kruningtthree = g3.get_group(0).count() * 5 / 60 # get activity belog to 0,1,2,3\r\n except:\r\n kruningtthree = 0\r\n try:\r\n kwalkingthree = g3.get_group(1).count() * 5 / 60 # get activity belog to 0,1,2,3\r\n except:\r\n kwalkingthree = 0\r\n\r\n ################### day 4 count action ###################\r\n try:\r\n krest4 = g4.get_group(3).count() * 5 / 60\r\n except:\r\n krest4 = 0\r\n try:\r\n kstand4 = g4.get_group(2).count() * 5 / 60\r\n except:\r\n kstand4 = 0\r\n try:\r\n kruning4 = g4.get_group(0).count() * 5 / 60\r\n except:\r\n kruning4 = 0\r\n try:\r\n kwalking4 = g4.get_group(1).count() * 5 / 60\r\n except:\r\n kwalking4 = 0\r\n\r\n ################### day 5 count action ###################\r\n try:\r\n krest5 = g5.get_group(3).count() * 5 / 60\r\n except:\r\n krest5 = 0\r\n try:\r\n kstand5 = g5.get_group(2).count() * 5 / 60\r\n except:\r\n kstand5 = 0\r\n try:\r\n kruning5 = g5.get_group(0).count() * 5 / 60\r\n except:\r\n kruning5 = 0\r\n try:\r\n kwalking5 = g5.get_group(1).count() * 5 / 60\r\n except:\r\n kwalking5 = 0\r\n\r\n ################### day 6 count action ###################\r\n try:\r\n krest6 = g6.get_group(3).count() * 5 / 60\r\n except:\r\n krest6 = 0\r\n try:\r\n kstand6 = g6.get_group(2).count() * 5 / 60\r\n except:\r\n kstand6 = 0\r\n try:\r\n kruning6 = g6.get_group(0).count() * 5 / 60\r\n except:\r\n kruning6 = 0\r\n try:\r\n kwalking6 = g6.get_group(1).count() * 5 / 60\r\n except:\r\n kwalking6 = 0\r\n\r\n ################### day 7 count action ###################\r\n try:\r\n krest7 = g7.get_group(3).count() * 5 / 60\r\n except:\r\n krest7 = 0\r\n try:\r\n kstand7 = g7.get_group(2).count() * 5 / 60\r\n except:\r\n kstand7 = 0\r\n try:\r\n kruning7 = g7.get_group(0).count() * 5 / 60\r\n except:\r\n kruning7 = 0\r\n try:\r\n kwalking7 = g7.get_group(1).count() * 5 / 60\r\n except:\r\n kwalking7 = 0\r\n\r\n ################### day 8 count action ###################\r\n try:\r\n krest8 = g8.get_group(3).count() * 5 / 60\r\n except:\r\n krest8 = 0\r\n try:\r\n kstand8 = g8.get_group(2).count() * 5 / 60\r\n except:\r\n kstand8 = 0\r\n try:\r\n kruning8 = g8.get_group(0).count() * 5 / 60\r\n except:\r\n kruning8 = 0\r\n try:\r\n kwalking8 = g8.get_group(1).count() * 5 / 60# Atention DATA POINT CHANGED HERE FOR HACK DATA MANIPULATE\r\n except:\r\n kwalking8 = 0\r\n\r\n ################### day 9 count action ###################\r\n try:\r\n krest9 = g9.get_group(3).count() * 5 / 60\r\n except:\r\n krest9 = 0\r\n try:\r\n kstand9 = g9.get_group(2).count() * 5 / 60\r\n except:\r\n kstand9 = 0\r\n try:\r\n kruning9 = g9.get_group(0).count() * 5 / 60\r\n except:\r\n kruning9 = 0\r\n try:\r\n kwalking9 = g9.get_group(1).count() * 5 / 60\r\n except:\r\n kwalking9 = 0\r\n\r\n ################### day 10 count action ###################\r\n try:\r\n krest10 = g10.get_group(3).count() * 5 / 60\r\n except:\r\n krest10 = 0\r\n try:\r\n kstand10 = g10.get_group(2).count() * 5 / 60\r\n except:\r\n kstand10 = 0\r\n try:\r\n kruning10 = g10.get_group(0).count() * 5 / 60\r\n except:\r\n kruning10 = 0\r\n try:\r\n kwalking10 = g10.get_group(1).count() * 5 / 60\r\n except:\r\n kwalking10 = 0\r\n\r\n\r\n\r\n restlist = [krest, kresttwo, krestthree, krest4, krest5, krest6, krest7, krest8, krest9, krest10]\r\n standlist = [kstand, kstandtwo, kstandthree, kstand4, kstand5, kstand6, kstand7, kstand8, kstand9, kstand10]\r\n runninglist = [kruning, kruningtwo, kruningtthree, kruning4, kruning5, kruning6, kruning7, kruning8, kruning9, kruning10]\r\n walkinglist = [kwalking, kwalkingtwo, kwalkingthree, kwalking4, kwalking5, kwalking6, kwalking7, kwalking8, kwalking9, kwalking10]\r\n\r\n chum = (krest + kresttwo + krestthree)/3\r\n totalrestlist = np.mean(restlist[0: choselist])\r\n totalstandinglist = np.mean(standlist[0: choselist])\r\n totalrunninglist = np.mean(runninglist[0: choselist])\r\n totalwalkinglist = np.mean(walkinglist[0: choselist])\r\n\r\n #print(kstand)\r\n #print(kstandtwo)\r\n #print(kstandthree)\r\n persentrest = (pd.to_numeric(((self.resttoday / (totalrestlist)) * 100)).loc['activity'])\r\n persentstand = (pd.to_numeric(((self.standtoday / (totalstandinglist)) * 100)).loc['activity'])\r\n persentrunning = (pd.to_numeric(((self.runningtoday / (totalrunninglist)) * 100)).loc['activity'])\r\n persentwalking = (pd.to_numeric(((self.walkingtoday / (totalwalkinglist)) * 100)).loc['activity'])\r\n\r\n coll_persent_of_kdays_actions = (persentrest, persentstand, persentrunning, persentwalking)\r\n summarize = sum(coll_persent_of_kdays_actions)\r\n maxvalue = max(persentrest, persentstand, persentrunning, persentwalking)\r\n Difference_Bihavior_percent = ((maxvalue / summarize) * 100)\r\n if Difference_Bihavior_percent < 50:\r\n print(\"Health Good\")\r\n elif Difference_Bihavior_percent >= 50:\r\n print(\"Health Warning\")\r\n elif Difference_Bihavior_percent <= 90:\r\n print(\"Health Bad\")\r\n\r\n\r\n # calculation = pd.to_numeric(totalrest.loc['activity'])\r\n #print(self.resttoday)\r\n print(persentrest)\r\n print(persentstand)\r\n print(persentrunning)\r\n #print(\"________________\")\r\n #print(self.standtoday)\r\n print(persentwalking)\r\n print(kstand8)\r\n print(kruning7)\r\n #print(\"________________\")\r\n #print(chum)\r\n #print(totalrestlist)\r\n\r\n\r\n N = 4\r\n\r\n yesterday = (persentrest, persentstand, persentrunning, persentwalking)\r\n today = (1, 1, 1, 1)\r\n menStd = (3, 3, 3, 3)\r\n womenStd = (3, 3, 3, 3)\r\n ind = np.arange(N) # the x locations for the groups\r\n width = 0.65 # the width of the bars: can also be len(x) sequence\r\n\r\n p1 = plt.bar(ind, today, width, yerr=menStd)\r\n p2 = plt.bar(ind, yesterday, width,\r\n bottom=today, yerr=womenStd)\r\n\r\n plt.ylabel('Scores')\r\n plt.title('Scores by group days')\r\n plt.xticks(ind, ('Resting', 'Standing', 'Running', 'Walking'))\r\n plt.yticks(np.arange(0, 200, 20))\r\n plt.legend((p1[0], p2[0]), ('From 100%', ''+str(choselist)+'K days'))\r\n\r\n plt.show()\r\n\r\n # plt.plot([totalrest, totalstand, totalrunning, totalwalking])\r\n # plt.plot([1,-2,3,4,-5])\r\n # plt.ylabel('action %')\r\n # plt.show()\r\n df2.plot.bar();\r\n\r\n\r\n\r\n# analyze = datetimedays()\r\n\r\n\r\n# Run = float(run)/3600\r\n# time = pd.date_range(run, periods=2018, freq='5min')\r\n# series = pd.Series(np.random.randint(100, size=2018), index=time)\r\n# print(row)\r\n\r\nif __name__ == '__main__':\r\n analyze = datetimedays()\r\n #analyze.twodaysrest()\r\n #analyze.threedaysresult()\r\n analyze.main(2)\r\n\r\n\r\n\r\n #analyze.twodaysrest()\r\n # chumchuq = 100-((analyze.main() /analyze.twodays( ))*100)\r\n # print(chumchuq)\r\n\r\n\r\n\r\n", "sub_path": "sim_hl_al.py", "file_name": "sim_hl_al.py", "file_ext": "py", "file_size_in_byte": 22875, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "datetime.datetime.now", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 15, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 15, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 16, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 17, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 20, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 20, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 21, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 21, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 23, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 23, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 24, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 24, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 25, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 25, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 25, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 40, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 46, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 46, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 68, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 68, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 91, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 117, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 118, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 135, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 136, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 137, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 138, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 151, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 155, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 155, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 161, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 161, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 162, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 162, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 164, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 164, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 179, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 180, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 181, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 202, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 203, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 204, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 205, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 218, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 221, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 221, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 222, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 222, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 225, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 225, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 226, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 226, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 227, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 227, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 228, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 228, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 229, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 229, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 231, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 231, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 255, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 256, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 257, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 258, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 259, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 260, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 261, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 262, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 263, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 264, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 473, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 474, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 475, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 476, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 481, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 482, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 483, "usage_type": "call"}, {"api_name": "pandas.to_numeric", "line_number": 484, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 519, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 522, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 522, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 523, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 523, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 526, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 526, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 527, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 527, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 528, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 528, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.yticks", "line_number": 529, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 529, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 529, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.legend", "line_number": 530, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 530, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 532, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 532, "usage_type": "name"}]} +{"seq_id": "351680581", "text": "\"\"\"\nProgram for simulering av landing\n\nAll kode er egenskrevet\n\"\"\"\n\nimport numpy as np\nimport sys, os\nimport time as tim\nimport matplotlib.pyplot as plt\nfrom scipy.integrate import solve_ivp\n\nsys.path.append(os.path.abspath(\"../1_the_Rocket_Engine\"))\nsys.path.append(os.path.abspath(\"../2_Planetary_Orbits\"))\nsys.path.append(os.path.abspath(\"../3_Habitable_zone\"))\nsys.path.append(os.path.abspath(\"../4_Onboard_Orientation_Software\"))\nsys.path.append(os.path.abspath(\"../5_Satellite_Launch\"))\nsys.path.append(os.path.abspath(\"../6_Preparing_for_Landing\"))\n\nimport launch\nfrom orbits import SolarSys\nfrom navigate import navigate\nfrom journey import Rocket\nfrom atmosphere import density\n\nimport ast2000tools.utils as util\nimport ast2000tools.constants as const\nfrom ast2000tools.space_mission import SpaceMission\nfrom ast2000tools.solar_system import SolarSystem\nfrom ast2000tools.shortcuts import SpaceMissionShortcuts as SMS\n\nfont = {\"family\": \"normal\", \"weight\": \"normal\", \"size\": 22}\n\nplt.rc(\"font\", **font)\n\n\nclass Landing:\n \"\"\"\n Functions very similar to ast2000tools.landing_sqeuence, but is plottable\n \"\"\"\n\n def __init__(self, r0, v0, system, mission):\n self.r = r0.reshape((1, 3)).T\n self.v = v0\n self.t = 0\n\n self.sys = system\n self.mis = mission\n\n self.R = self.sys.radii[self.mis.dest] * 1000\n self.M = self.sys.masses[self.mis.dest] * const.m_sun\n self.m = self.sys.mission.lander_mass\n self.rho_at_0 = self.sys.atmospheric_densities[self.mis.dest]\n\n self.parachute_deployed = False\n self.parachute_area = (\n 2 * const.G * self.M * self.m / (self.rho_at_0 * (3 * self.R) ** 2)\n )\n\n def deploy(self):\n self.parachute_deployed = True\n\n def free_fall(self, T, dt=1e-3):\n def F_d(r, v):\n # Dragforce, used atmospheric density from atmosphere.py\n C_d = 1\n if self.parachute_deployed:\n A = self.parachute_area\n else:\n A = self.sys.mission.lander_area\n omega = 2 * np.pi / (self.sys.rotational_periods[self.mis.dest] * const.day)\n w = omega * np.asarray([-r[1], r[0], 0]) # atmospheric velocity\n v_d = v - w\n dens = density(np.linalg.norm(r) - self.R)\n return dens / 2 * C_d * A * np.linalg.norm(v_d) * (-v_d)\n\n def gravity(r):\n return -r * const.G * self.M * np.linalg.norm(r) ** -3\n\n def rddot(r, v):\n if np.linalg.norm(r) <= self.R:\n return np.zeros(3)\n else:\n a = gravity(r) + F_d(r, v) / self.m\n return a\n\n def at_surface(t, u):\n r = u[:3]\n R = np.linalg.norm(r)\n return R - self.R\n\n at_surface.terminal = True # when lander is close to ground, integrator stops.\n\n def rhs(t, u):\n r = u[:3]\n v = u[3:]\n dr = v\n dv = rddot(r, v)\n return np.concatenate((dr, dv))\n\n nT = int(T / dt)\n\n t = np.linspace(self.t, self.t + T, nT, endpoint=False)\n r0 = self.r[:, -1]\n v0 = self.v\n\n u0 = np.concatenate((r0, v0))\n faller = solve_ivp(\n rhs, (t[0], t[-1]), u0, t_eval=t, events=at_surface, atol=1e-6, rtol=1e-6\n )\n\n falled = faller.y\n\n r, v = np.split(falled, 2)\n\n self.r = np.concatenate((self.r, r), axis=1)\n self.v = v[:, -1]\n self.t = t[-1]\n\n def boost(self, v):\n plt.scatter(self.r[0, -1], self.r[1, -1])\n self.v += v\n\n def orient(self):\n print(f\"Time: {self.t}\")\n print(f\"Position: {self.r[:, -1]}\")\n print(f\"Velocity: {self.v}\")\n return self.t, self.r[:, -1], self.v\n\n def plot(self):\n h = np.linspace(0, 2 * np.pi, 1000)\n x = self.R * np.cos(h)\n y = self.R * np.sin(h)\n plt.plot(x, y, color=\"g\")\n plt.fill(x, y, color=\"g\")\n\n cut = 100\n x, y, z = self.r[:, ::cut]\n plt.plot(x, y)\n plt.axis(\"equal\")\n plt.title(\"Entering very low orbit\")\n plt.xlabel(\"Distance (m)\")\n plt.ylabel(\"Distance (m)\")\n plt.text(\n 0.6,\n 0.7,\n \"Vogsphere\",\n size=20,\n ha=\"center\",\n va=\"center\",\n bbox=dict(boxstyle=\"round\", ec=(1.0, 0.5, 0.5), fc=(1.0, 0.8, 0.8),),\n )\n plt.show()\n\n time = np.linspace(0, self.t, len(x))\n height = np.linalg.norm(self.r[:, ::cut], axis=0) - self.R\n plt.plot(time, height)\n plt.show()\n\n\ndef stabilize_orbit(r0, v0, system, dest):\n r = np.linalg.norm(r0)\n t_tang_normed = np.array([-r0[1], r0[0], r0[2]]) / r\n\n vpm = np.sqrt(const.G * system.masses[dest] * const.m_sun / r) * t_tang_normed\n\n return vpm - v0\n\n\ndef find_landing_site(r, t, system, dest):\n R = np.linalg.norm(r)\n omega = 2 * np.pi / (system.rotational_periods[dest] * const.day)\n phi0 = np.arctan(r[1] / r[0])\n phi = phi0 + omega * t\n rx = R * np.cos(phi)\n ry = R * np.sin(phi)\n return np.asarray([rx, ry, 0]), phi * 180 / np.pi\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Initialize everything from scratch\n make system and mission\n launch and verify\n use shortcut to record destination\n begin landings\n stabilize orbit\n \"\"\"\n seed = 76117\n path = \"./../verification_data\"\n system = SolarSys(seed, path, False, True)\n mission = SpaceMission(seed, path, False, True)\n system.mission = mission\n\n launch_time = 0.75\n launch_site = 0\n\n years = launch_time + 1\n dt_pr_yr = 1e-5\n destination = 1\n r_p = system.radii[destination] * 1000\n\n system.differential_orbits(years, dt_pr_yr)\n\n Volcano, Epstein = launch.do_launch(Rocket=Rocket, verb=False)\n launch.change_reference(mission, system, Volcano, Epstein, launch_site, launch_time)\n mission.verify_manual_orientation(*navigate(system, mission, path))\n\n Volcano.begin_interplanetary_journey(\n system, mission, destination=destination, verbose=False\n )\n\n # We use shortcut to record destination to begin landing\n time = 0.11598196795767118\n shortcut = SMS(mission, [97905])\n shortcut.place_spacecraft_in_unstable_orbit(time, destination)\n\n lander = mission.begin_landing_sequence(mission)\n t0, r0, v0 = lander.orient()\n boost = stabilize_orbit(r0, v0, system, destination)\n lander.boost(boost)\n t0, r0, v0 = lander.orient()\n\n \"\"\"\n Landing instance is our landing sequence.\n Very simmilar to interplanetary travel, but with air resistance\n We don't really use it in this code, but it is plottable,\n so we used it to see where we were when landing.\n methods boost free_fall and orient is practically exactly same as for\n landing_sequence from ast2000tools.space_mission\n \"\"\"\n landing = Landing(r0, v0, system, Volcano)\n\n lander.adjust_parachute_area(landing.parachute_area)\n\n t, r, v = lander.orient()\n lander.boost(-v * 0.32)\n landing.boost(-v * 0.32)\n lander.fall(10390)\n landing.free_fall(10390)\n\n t_low_orbit = 5437.8\n t, r, v = lander.orient()\n stabilizer = stabilize_orbit(r, v, system, destination)\n lander.boost(stabilizer)\n landing.boost(stabilizer)\n t, r, v = lander.orient()\n print(\n \"\\n\\nLanding site is at: %.4f deg\\n\\n\"\n % (find_landing_site(r, t_low_orbit + 5600 + 85.9, system, destination)[1])\n )\n lander.look_in_direction_of_planet()\n lander.take_picture(filename=\"pic1.xml\")\n lander.fall(t_low_orbit)\n landing.free_fall(t_low_orbit)\n landing.plot()\n\n t, r, v = lander.orient()\n lander.launch_lander(-v * 0.2)\n lander.deploy_parachute()\n lander.fall(5600)\n lander.start_video()\n lander.fall(80)\n\n t, r, v = lander.orient()\n omega = 2 * np.pi / (system.rotational_periods[destination] * const.day)\n r_surf = r / np.linalg.norm(r) * r_p\n v_surf = omega * np.asarray([-r_surf[1], r_surf[0], 0])\n v_rel = np.linalg.norm(v - v_surf)\n g = const.G * system.masses[destination] * const.m_sun / r_p ** 2\n if v_rel > 10:\n h = np.linalg.norm(r) - r_p\n E = (\n 1 / 2 * mission.lander_mass * (v_rel ** 2)\n + mission.lander_mass * g * h * 0.75\n )\n lander.adjust_landing_thruster(force=E / h)\n lander.activate_landing_thruster()\n\n lander.fall(100)\n lander.finish_video()\n", "sub_path": "7_Landing/landing.py", "file_name": "landing.py", "file_ext": "py", "file_size_in_byte": 8436, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "sys.path.append", "line_number": 13, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 13, "usage_type": "call"}, {"api_name": "os.path", "line_number": 13, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 14, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 14, "usage_type": "call"}, {"api_name": "os.path", "line_number": 14, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 15, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 15, "usage_type": "call"}, {"api_name": "os.path", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 16, "usage_type": "call"}, {"api_name": "os.path", "line_number": 16, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 17, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 17, "usage_type": "call"}, {"api_name": "os.path", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 18, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.rc", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "ast2000tools.constants.m_sun", "line_number": 51, "usage_type": "attribute"}, {"api_name": "ast2000tools.constants", "line_number": 51, "usage_type": "name"}, {"api_name": "ast2000tools.constants.G", "line_number": 57, "usage_type": "attribute"}, {"api_name": "ast2000tools.constants", "line_number": 57, "usage_type": "name"}, {"api_name": "numpy.pi", "line_number": 71, "usage_type": "attribute"}, {"api_name": "ast2000tools.constants.day", "line_number": 71, "usage_type": "attribute"}, {"api_name": "ast2000tools.constants", "line_number": 71, "usage_type": "name"}, {"api_name": "numpy.asarray", "line_number": 72, "usage_type": "call"}, {"api_name": "atmosphere.density", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 74, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 74, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 75, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 75, "usage_type": "attribute"}, {"api_name": "ast2000tools.constants.G", "line_number": 78, "usage_type": "attribute"}, {"api_name": "ast2000tools.constants", "line_number": 78, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 78, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 81, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 81, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 89, "usage_type": "attribute"}, {"api_name": "numpy.concatenate", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.linspace", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 107, "usage_type": "call"}, {"api_name": "scipy.integrate.solve_ivp", "line_number": 108, "usage_type": "call"}, {"api_name": "numpy.split", "line_number": 114, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 121, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 121, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 131, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 131, "usage_type": "attribute"}, {"api_name": "numpy.cos", "line_number": 132, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 133, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 134, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 134, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.fill", "line_number": 135, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 135, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 143, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 143, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.text", "line_number": 144, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 144, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 153, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 155, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 156, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 156, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 157, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 157, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 158, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 162, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 162, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 163, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 165, "usage_type": "call"}, {"api_name": "ast2000tools.constants.G", "line_number": 165, "usage_type": "attribute"}, {"api_name": "ast2000tools.constants", "line_number": 165, "usage_type": "name"}, {"api_name": "ast2000tools.constants.m_sun", "line_number": 165, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 171, "usage_type": "attribute"}, {"api_name": "numpy.pi", "line_number": 172, "usage_type": "attribute"}, {"api_name": "ast2000tools.constants.day", "line_number": 172, "usage_type": "attribute"}, {"api_name": "ast2000tools.constants", "line_number": 172, "usage_type": "name"}, {"api_name": "numpy.arctan", "line_number": 173, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 175, "usage_type": "call"}, {"api_name": "numpy.sin", "line_number": 176, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 177, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 177, "usage_type": "attribute"}, {"api_name": "orbits.SolarSys", "line_number": 191, "usage_type": "call"}, {"api_name": "ast2000tools.space_mission.SpaceMission", "line_number": 192, "usage_type": "call"}, {"api_name": "launch.do_launch", "line_number": 205, "usage_type": "call"}, {"api_name": "journey.Rocket", "line_number": 205, "usage_type": "name"}, {"api_name": "launch.change_reference", "line_number": 206, "usage_type": "call"}, {"api_name": "navigate.navigate", "line_number": 207, "usage_type": "call"}, {"api_name": "ast2000tools.shortcuts.SpaceMissionShortcuts", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 266, "usage_type": "attribute"}, {"api_name": "ast2000tools.constants.day", "line_number": 266, "usage_type": "attribute"}, {"api_name": "ast2000tools.constants", "line_number": 266, "usage_type": "name"}, {"api_name": "numpy.linalg.norm", "line_number": 267, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 267, "usage_type": "attribute"}, {"api_name": "numpy.asarray", "line_number": 268, "usage_type": "call"}, {"api_name": "numpy.linalg.norm", "line_number": 269, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 269, "usage_type": "attribute"}, {"api_name": "ast2000tools.constants.G", "line_number": 270, "usage_type": "attribute"}, {"api_name": "ast2000tools.constants", "line_number": 270, "usage_type": "name"}, {"api_name": "ast2000tools.constants.m_sun", "line_number": 270, "usage_type": "attribute"}, {"api_name": "numpy.linalg.norm", "line_number": 272, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 272, "usage_type": "attribute"}]} +{"seq_id": "241995971", "text": "#!/bin/bash python3.7\n'''\nhttps://leetcode.com/problems/next-greater-element-i/\n'''\nfrom typing import List\n\n\nclass Solution:\n def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:\n nums2_set = set(nums2)\n length = len(nums2)\n result = [None] * len(nums1)\n for index, num in enumerate(nums1):\n greater = -1\n if num in nums2_set:\n i = nums2.index(num) + 1\n while i < length:\n if nums2[i] > num:\n greater = nums2[i]\n break\n else:\n i += 1\n result[index] = greater\n return result\n\n\nassert Solution().nextGreaterElement([4, 1, 2],\n [1, 3, 4, 2]) == [-1, 3, -1]\n", "sub_path": "python/leetcode/496_next_greater_element_i.py", "file_name": "496_next_greater_element_i.py", "file_ext": "py", "file_size_in_byte": 824, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "typing.List", "line_number": 9, "usage_type": "name"}]} +{"seq_id": "497861466", "text": "from __future__ import print_function\n\nimport argparse\nimport keras\nimport signal\nfrom utils.utils import *\n\n# python 2 and python 3\ntry:\n input = raw_input\nexcept:\n pass\n \n# deal with ctrl+c\nclass InterruptException(Exception):\n pass\n \ndef interrupt(signum, frame):\n raise InterruptException('')\n \n \ndef main(): \n loss_list = [] \n '''\n Parse arguments\n '''\n parser = argparse.ArgumentParser()\n parser.add_argument('-n', '--new', action='store_true', help='if true, erase the local file and train a new model')\n parser.add_argument('-p', '--predict', action='store_true', help='if true, skip the training step and make predictions only')\n parser.add_argument('-a', '--all', action='store_true', help='if true, feed all related parameters into the model')\n parser.add_argument('-g1', '--graph-prediction', action='store_true', help='if true, generate the prediction graph')\n parser.add_argument('-g2', '--graph-scatter', action='store_true', help='if true, generate the scatter graph and the predictions in the output folder')\n args = parser.parse_args()\n print(\"New model:\", args.new)\n print(\"Predictions only:\", args.predict)\n if (args.graph_prediction and args.graph_scatter):\n print(\"Cannot generate two graphs at the same time.\")\n return\n if (args.new==False):\n print('Warning: If you change the configs of the structure, you may use the \"--new\" or \"-n\" option.')\n ''' \n Configurations:\n - t_x means the number of data for predicting the next one (similar to num_of_units in the previous version)\n - out_d means the output dimension of the rnn units\n - num_of_dense1 and num_of_dense2 are the numbers of cells in two dense layers\n - num_to_pred represents the number of hours ahead to be predicted.\n \n - epoch means the times of training(using all data)\n - batch size means the batch of data to feed during per training time.\n \n Updated at 08/14/2018:\n - Erase the default config, and it will now throw exceptions if there is no valid configurations. \n - The original version may lead to misuse\n '''\n config = read_configs(\"config\")\n if config == -1:\n return\n \n ''' \n Load and Preprocess Datas\n \n Updated at 08/14/2018:\n - Change the order of parameters stored in the matrix\n - Change the way to pick consecutive data\n \n '''\n data_1 = scipy.io.loadmat(config[\"data_file\"])\n x_mat = np.vstack((data_1[\"hour\"], data_1[\"month\"], data_1[\"relative_humidity\"], data_1[\"air_temperature\"], data_1[\"total_cloud_fraction\"], data_1[\"toa_irradiance\"], data_1[\"day\"], data_1[\"year\"]))\n y_mat = np.array(data_1[\"surface_irradiance\"])\n x_and_y = np.hstack((y_mat.T, x_mat.T))\n if args.all:\n paras = x_and_y.shape[1]-2 # factors used for prediction\n else:\n paras = 1\n config['paras'] = paras\n t_x = config[\"t_x\"]\n num_to_pred = config[\"num_to_pred\"]\n consecutive_data_list = to_consecutive_list(x_and_y, t_x, day=-2, hour=1, label=0)\n training_data = np.array(consecutive_data_list).astype(int)\n training_data = np.reshape(training_data[:, :, :],[len(consecutive_data_list),t_x, -1])\n raw_data = copy.deepcopy(training_data)\n training_data = raw_data[0:int(0.8*len(consecutive_data_list)),:,:]\n test_data = raw_data[int(0.8*len(consecutive_data_list)):,:,:]\n print(raw_data.shape, training_data.shape, test_data.shape)\n \n '''\n Build the model\n \n Updated at 08/16/2018:\n - Now able to predict N-hour ahead solar irradiance by changing the \"num_to_pred\" config.\n '''\n if args.new:\n model = weather_forecast_model(config)\n else:\n with open('model-files/'+config[\"file_model\"], 'r') as f:\n model = model_from_json(f.read())\n if not args.new:\n model.load_weights('model-files/' + config['file_weights'])\n print(\"Using pre-trained model.\")\n model.compile(loss='mse',optimizer=keras.optimizers.Adam(lr=config[\"lr\"]))\n print(\"\")\n model.summary()\n \n ''' print loss '''\n if not args.predict:\n loss_train = model.evaluate(x=training_data[:, :-num_to_pred, :paras], y=training_data[:, -num_to_pred:, 0],batch_size=config['batch-size'])\n print('The loss of the training set is', loss_train)\n loss_test = model.evaluate(x=test_data[:, :-num_to_pred, :paras], y=test_data[:, -num_to_pred:, 0],batch_size=config['batch-size'])\n print('The loss of the test set is', loss_test)\n loss_list.append([loss_train, loss_test])\n \n ''' train '''\n if not args.predict:\n print(\"\")\n try:\n signal.signal(signal.SIGINT, interrupt)\n \n for i in range(int(config[\"epoch\"]/5)):\n \n model.fit(training_data[:, :-num_to_pred, :paras], training_data[:, -num_to_pred:, 0], epochs=5, batch_size=config['batch-size'])\n loss_train = model.evaluate(x=training_data[:, :-num_to_pred, :paras], y=training_data[:, -num_to_pred:, 0], batch_size=config['batch-size'])\n loss_test = model.evaluate(x=test_data[:, :-num_to_pred, :paras], y=test_data[:, -num_to_pred:, 0], batch_size=config['batch-size'])\n print('The loss of the training set is', loss_train)\n print('The loss of the test set is', loss_test)\n loss_list.append([loss_train, loss_test])\n except InterruptException:\n if input(\"\\nInput y to save the model, otherwise abandon: \")=='y':\n pass\n else:\n return\n \n ## print loss ##\n \n loss_train = model.evaluate(x=training_data[:, :-num_to_pred, :paras], y=training_data[:, -num_to_pred:, 0],batch_size=config['batch-size'])\n print('The loss of the training set is', loss_train)\n loss_test = model.evaluate(x=test_data[:, :-num_to_pred, :paras], y=test_data[:, -num_to_pred:, 0],batch_size=config['batch-size'])\n print('The loss of the test set is', loss_test)\n with open('loss-list.txt', 'a') as f:\n for tup in loss_list:\n f.write(str(tup[0]) + ', ' + str(tup[1]) + '\\n')\n with open('model-files/'+config[\"file_model\"], 'w') as f:\n f.write(model.to_json())\n print(\"Successfully save models to local\")\n model.save_weights('model-files/' + config['file_weights'])\n print(\"Successfully save weights to local\")\n \n '''\n Generate graphs\n '''\n if args.graph_prediction:\n prediction(model, training_data, config)\n if args.graph_scatter:\n scatter(model, test_data, config)\n \n\nif __name__ == \"__main__\":\n \n main()\n print(\"##########################END OF PROGRAM##########################\")\n print(\"\")\n\n\n", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 6750, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 27, "usage_type": "call"}, {"api_name": "keras.optimizers.Adam", "line_number": 100, "usage_type": "call"}, {"api_name": "keras.optimizers", "line_number": 100, "usage_type": "attribute"}, {"api_name": "signal.signal", "line_number": 116, "usage_type": "call"}, {"api_name": "signal.SIGINT", "line_number": 116, "usage_type": "attribute"}]} +{"seq_id": "142008240", "text": "# -*- coding: utf-8 -*-\n#from __future__ import unicode_literals\nimport io\nimport csv\nimport os\nimport time\nimport sys\n#import urllib2\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\n#from grs import RealtimeWeight\nimport stock_comm\nimport stock_comm as cmm \nimport requests\nimport inspect\nfrom inspect import currentframe, getframeinfo\nimport pandas as pd\nfrom bs4 import BeautifulSoup \nimport re\n#import pyecharts\n#from pyecharts import Kline\n#from pyecharts import Candlestick\n#import webbrowser\ndef lno():\n cf = currentframe()\n filename = getframeinfo(cf).filename\n return '%s-L(%d)'%(os.path.basename(filename),inspect.currentframe().f_back.f_lineno)\n\ndef check_dst_folder(dstpath):\n if not os.path.isdir(dstpath):\n os.makedirs(dstpath) \n\n \n \ndef down_taiwan_dollar(startdate,enddate):\n\n dst_folder='csv/taiwan_dollar'\n filename='csv/taiwan_dollar/tmp.csv'\n out_file='csv/taiwan_dollar/taiwan_dollar_data.csv'\n check_dst_folder(dst_folder)\n startdate_str='%d/%02d/%02d'%(int(startdate.year), int(startdate.month),int(startdate.day))\n enddate_str='%d/%02d/%02d'%(int(enddate.year), int(enddate.month),int(enddate.day))\n url = 'http://www.taifex.com.tw/cht/3/dailyFXRateDown'\n query_params = {\n 'queryStartDate': startdate_str,\n 'queryEndDate': enddate_str\n }\n\n page = requests.post(url, data=query_params)\n\n if not page.ok:\n print(lno(),\"Can not get data at {}\".format(url))\n return \n with open(filename, 'wb') as file:\n # A chunk of 128 bytes\n for chunk in page:\n file.write(chunk)\n df = pd.read_csv(filename,encoding = 'big5')\n df.dropna(axis=1,how='all',inplace=True)\n df.dropna(inplace=True)\n #print (lno(),df)\n if os.path.exists(out_file): \n df_s = pd.read_csv(out_file,encoding = 'utf-8')\n df_s.dropna(axis=1,how='all',inplace=True)\n df_s.dropna(inplace=True)\n \n \n df_s=df_s.append(df,ignore_index=True)\n \n df_s.drop_duplicates(subset=['日期'],keep='first',inplace=True)\n df_s=df_s.sort_values(by=['日期'], ascending=False)\n \n df_s.to_csv(out_file,encoding='utf-8', index=False)\n \n else :\n df.to_csv(out_file,encoding='utf-8', index=False)\n \n \ndef get_taiwan_dollor(date):\n out_file='csv/taiwan_dollar/taiwan_dollar_data.csv'\n #print(lno(),date)\n if os.path.exists(out_file): \n \n date_str='%d/%02d/%02d'%(int(date.year), int(date.month),int(date.day))\n df_s = pd.read_csv(out_file,encoding = 'utf-8')\n df_s.dropna(axis=1,how='all',inplace=True)\n df_s.dropna(inplace=True)\n #print(lno(),df_s[(df_s['date'] == date_str)].values.tolist())\n df=df_s[(df_s['日期'] == date_str)]\n if len(df)==1:\n df.reset_index(inplace=True)\n #total=float(df.iat[0,'外資total'].strip().replace(',', ''))\n try:\n total=float(df.at[0,'美元/新台幣'])\n except:\n print (lno(),df.at[0,'美元/新台幣']) \n total=0 \n \n #print(lno(),total_int)\n return total\n return 0\n \n else :\n return 0 \n \ndef down_stock_class1(startdate,enddate):\n\n dst_folder='csv/taiwan_dollar'\n filename='csv/taiwan_dollar/tmp.csv'\n out_file='csv/taiwan_dollar/taiwan_dollar_data.csv'\n check_dst_folder(dst_folder)\n startdate_str='%d/%02d/%02d'%(int(startdate.year), int(startdate.month),int(startdate.day))\n enddate_str='%d/%02d/%02d'%(int(enddate.year), int(enddate.month),int(enddate.day))\n url = 'https://www.moneydj.com/z/zg/zgd/zgd_E_E.djhtm'\n\ndef get_group(date):\n out_file='csv/group/group.csv'\n #print(lno(),date)\n if os.path.exists(out_file): \n \n date_str='%d/%02d/%02d'%(int(date.year), int(date.month),int(date.day))\n df_s = pd.read_csv(out_file,encoding = 'utf-8')\n df_s.dropna(axis=1,how='all',inplace=True)\n df_s.dropna(inplace=True)\n #print(lno(),df_s)\n for i in range(0,len(df_s)):\n #print(lno(),df_s.at[i,'groud_name'])\n stock_list=df_s.at[i,'stocks'].split(',')\n \n print(lno(),df_s.at[i,'groud_name'],len(stock_list),stock_list)\n for j in stock_list:\n print(lno(),j)\n break \n return 0\n \n else :\n return 0 \n \nif __name__ == '__main__':\n #print (lno(),sys.path[0])\n #get_cur_twii_list(datetime.today())\n if len(sys.argv)==1:\n #startdate=stock_comm.get_date()\n #down_taiwan_dollar(startdate,startdate)\n \n ##抓取集團名單\n ##再抓取集團股票名單,save group_stock.csv\n url = 'https://www.moneydj.com/z/zg/zgd/zgd_E_E.djhtm'\n r = requests.get(url, allow_redirects=True)\n if r.ok:\n \n soup = BeautifulSoup(r.content.decode('cp950'), 'lxml')\n res=[]\n for option in soup.find_all('option'):\n tmp=[]\n if '集團' in option.text:\n print ('value: {}, text: {}'.format(option['value'], option.text))\n #https://www.moneydj.com/z/zg/zgd_EG00041_1.djhtm\n url_stock='https://www.moneydj.com/z/zg/zgd_{}_1.djhtm'.format(option['value'])\n r_stock = requests.get(url_stock, allow_redirects=True)\n if not r_stock.ok:\n print(lno(),\"get fail\")\n continue\n soup_stock = BeautifulSoup(r_stock.content.decode('cp950'), 'lxml') \n #script = soup_stock.find(\"script\", text=pattern)\n stocks=''\n ids = soup_stock.find_all(id=\"oAddCheckbox\")\n print(lno(),len(ids))\n for id in ids:\n #print(lno(),id)\n stock=[]\n #t=id.get_text().strip().replace('(',',').replace(')',',').split(',')\n #print(lno(),id.get_text())\n t=re.findall(r'[(](.*?)[)]', id.get_text())\n if len(t)==0:\n continue\n print(lno(),len(t),t)\n tt=t[0].split(',')\n #print(lno(),len(tt),tt)\n stock_id=tt[0].strip().replace('AS','').replace('\\\"','').replace('\\'','')\n stock_name=tt[1].strip().replace('\\\"','').replace('\\'','')\n stock.append(stock_id)\n #stock.append(stock_name)\n print(lno(),stock_id,stock_name)\n if stocks=='':\n stocks='{}'.format(stock_id)\n else:\n stocks='{},{}'.format(stocks,stock_id)\n tmp.append(option.text) #集團名稱\n tmp.append(option['value']) #網頁代號\n tmp.append(stocks)\n res.append(tmp)\n \n print(lno(),res)\n labels = ['groud_name','group_id', 'stocks']\n df = pd.DataFrame.from_records(res, columns=labels)\n if not os.path.isdir('csv/group'):\n os.makedirs('csv/group') \n out_file='csv/group/group.csv'\n df.to_csv(out_file,encoding='utf-8', index=False)\n \n \n #open('stock_class_v1.html', 'wb').write(r.content)\n #data=pd.read_html(url, flavor='bs4', header=0, encoding='UTF8')\n #data=pd.read_html(url)\n #print(lno(),r.content)\n elif sys.argv[1]=='-d' :\n #print (lno(),len(sys.argv))\n if len(sys.argv)==4 :\n # 從今日往前抓一個月\n startdate=datetime.strptime(sys.argv[2],'%Y%m%d')\n enddate=datetime.strptime(sys.argv[3],'%Y%m%d')\n down_taiwan_dollar(startdate,enddate) \n else :\n print (lno(),'func -p startdata enddate') \n elif sys.argv[1]=='-g' :\n if len(sys.argv)==3 :\n #參數2:開始日期 \n startdate=datetime.strptime(sys.argv[2],'%Y%m%d')\n dollar =get_taiwan_dollor(startdate)\n #df['外資buy']=df['外資buy'].astype('float64') \n \n print(dollar)\n \n\n else :\n print (lno(),'func -g date') \n \n \n \n \n else: \n objdatetime=datetime.strptime(sys.argv[1],'%Y%m%d')\n get_group(objdatetime)\n \n \n ", "sub_path": "stock_class.py", "file_name": "stock_class.py", "file_ext": "py", "file_size_in_byte": 8704, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "inspect.currentframe", "line_number": 25, "usage_type": "call"}, {"api_name": "inspect.getframeinfo", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "inspect.currentframe", "line_number": 27, "usage_type": "call"}, {"api_name": "os.path.isdir", "line_number": 30, "usage_type": "call"}, {"api_name": "os.path", "line_number": 30, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 31, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 58, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 62, "usage_type": "call"}, {"api_name": "os.path", "line_number": 62, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 63, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 82, "usage_type": "call"}, {"api_name": "os.path", "line_number": 82, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 85, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 119, "usage_type": "call"}, {"api_name": "os.path", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 122, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 142, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 149, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 152, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 160, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 164, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 174, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_records", "line_number": 196, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 196, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 197, "usage_type": "call"}, {"api_name": "os.path", "line_number": 197, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 198, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 207, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 209, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 211, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 211, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 211, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 212, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 212, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 212, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 216, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 217, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 219, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 219, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 219, "usage_type": "attribute"}, {"api_name": "datetime.datetime.strptime", "line_number": 233, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 233, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 233, "usage_type": "attribute"}]} +{"seq_id": "161430064", "text": "from six.moves import queue\nimport pyaudio\nfrom google.cloud import speech\nfrom google.cloud.speech import enums\nfrom google.cloud.speech import types\n\nstream_close = False # ストリーミング終了時にTrueとなる\n\nSTREAMING_LIMIT = 240000 \nSAMPLE_RATE = 16000\nCHUNK_SIZE = int(SAMPLE_RATE / 10) \n\n\nclass ResumableMicrophoneStream:\n\n def __init__(self, rate, chunk_size):\n \n self._rate = rate\n self.chunk_size = chunk_size\n self._num_channels = 1\n \n # 取得した音声を格納するキュー\n self._buff = queue.Queue() \n \n # マイクから音声を入力するインスタンス\n self._audio_interface = pyaudio.PyAudio()\n self._audio_stream = self._audio_interface.open(\n format=pyaudio.paInt16,\n channels=self._num_channels,\n rate=self._rate,\n input=True,\n frames_per_buffer=self.chunk_size,\n stream_callback=self._fill_buffer,\n )\n\n \n # with文実行時に呼ばれる\n def __enter__(self):\n\n global stream_close\n stream_close = False\n return self\n\n # with文終了時に呼ばれる\n def __exit__(self, type, value, traceback):\n\n self._audio_stream.stop_stream()\n self._audio_stream.close()\n self._buff.put(None)\n self._audio_interface.terminate()\n global stream_close\n stream_close = True\n\n \n def _fill_buffer(self, in_data, *args, **kwargs):\n\n # マイクから入力した音声をキューに格納する\n self._buff.put(in_data)\n return None, pyaudio.paContinue\n\n \n def generator(self):\n\n global stream_close\n while not stream_close:\n data = []\n\n chunk = self._buff.get()\n \n if chunk is None:\n return\n\n data.append(chunk)\n \n # キューが空になるまでdataリストに追加する\n while True:\n try:\n chunk = self._buff.get(block=False)\n\n if chunk is None:\n return\n data.append(chunk)\n\n except queue.Empty:\n break\n\n yield b''.join(data)\n\n\n## 音声のテキスト化を表示する関数\ndef listen_print_loop(responses, stream):\n \n global stream_close\n\n for response in responses:\n\n if not response.results:\n continue\n\n result = response.results[0]\n\n if not result.alternatives:\n continue\n\n transcript = result.alternatives[0].transcript\n\n # 文末と判定したら区切る\n if result.is_final:\n print(transcript)\n else:\n print(' ', transcript)\n\n\n # 『エンド』を言うと終了する\n if transcript == 'エンド':\n stream_close = True\n \n \n## Speech to Textを実行する関数 \ndef excecute_speech_to_text_streaming():\n\n print('Start Speech to Text Streaming')\n\n client = speech.SpeechClient()\n config = speech.types.RecognitionConfig(\n encoding=speech.enums.RecognitionConfig.AudioEncoding.LINEAR16,\n sample_rate_hertz=SAMPLE_RATE,\n language_code='ja-JP',\n max_alternatives=1\n )\n streaming_config = speech.types.StreamingRecognitionConfig(\n config=config,\n interim_results=True\n )\n\n mic_manager = ResumableMicrophoneStream(SAMPLE_RATE, CHUNK_SIZE)\n with mic_manager as stream:\n \n # マイクから入力した音声の取得\n audio_generator = stream.generator()\n\n requests = (\n speech.types.StreamingRecognizeRequest(audio_content=content) for content in audio_generator\n )\n\n # Google Speech to Text APIを使って音声をテキストに変換\n responses = client.streaming_recognize(\n streaming_config,\n requests\n )\n \n # テキスト変換結果を表示する\n listen_print_loop(responses, stream)\n\n print('End Speech to Text Streaming')\n\n\nif __name__ == '__main__':\n excecute_speech_to_text_streaming()\n\n", "sub_path": "practice_streaming_speech_to_text.py", "file_name": "practice_streaming_speech_to_text.py", "file_ext": "py", "file_size_in_byte": 4228, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "six.moves.queue.Queue", "line_number": 23, "usage_type": "call"}, {"api_name": "six.moves.queue", "line_number": 23, "usage_type": "name"}, {"api_name": "pyaudio.PyAudio", "line_number": 26, "usage_type": "call"}, {"api_name": "pyaudio.paInt16", "line_number": 28, "usage_type": "attribute"}, {"api_name": "pyaudio.paContinue", "line_number": 59, "usage_type": "attribute"}, {"api_name": "six.moves.queue.Empty", "line_number": 84, "usage_type": "attribute"}, {"api_name": "six.moves.queue", "line_number": 84, "usage_type": "name"}, {"api_name": "google.cloud.speech.SpeechClient", "line_number": 124, "usage_type": "call"}, {"api_name": "google.cloud.speech", "line_number": 124, "usage_type": "name"}, {"api_name": "google.cloud.speech.types.RecognitionConfig", "line_number": 125, "usage_type": "call"}, {"api_name": "google.cloud.speech.types", "line_number": 125, "usage_type": "attribute"}, {"api_name": "google.cloud.speech", "line_number": 125, "usage_type": "name"}, {"api_name": "google.cloud.speech.enums", "line_number": 126, "usage_type": "attribute"}, {"api_name": "google.cloud.speech", "line_number": 126, "usage_type": "name"}, {"api_name": "google.cloud.speech.types.StreamingRecognitionConfig", "line_number": 131, "usage_type": "call"}, {"api_name": "google.cloud.speech.types", "line_number": 131, "usage_type": "attribute"}, {"api_name": "google.cloud.speech", "line_number": 131, "usage_type": "name"}, {"api_name": "google.cloud.speech.types.StreamingRecognizeRequest", "line_number": 143, "usage_type": "call"}, {"api_name": "google.cloud.speech.types", "line_number": 143, "usage_type": "attribute"}, {"api_name": "google.cloud.speech", "line_number": 143, "usage_type": "name"}]} +{"seq_id": "617884179", "text": "from django.conf.urls import include, url\nfrom django.db import models\n\nfrom rest_framework import serializers, status, viewsets\nfrom rest_framework.reverse import reverse\nfrom rest_framework.routers import DefaultRouter\nfrom rest_framework.test import APIRequestFactory, APITestCase\n\nfrom .urls import urlpatterns\n\nfactory = APIRequestFactory()\n\n\n# no namesapce: Model, serializer and viewset\n\n\nclass NoNamespaceModel(models.Model):\n pass\n\n\nclass NoNamespaceModelSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = NoNamespaceModel\n\n\nclass NoNamespaceModelViewSet(viewsets.ModelViewSet):\n queryset = NoNamespaceModel.objects.all()\n serializer_class = NoNamespaceModelSerializer\n\n\nno_namespace_router = DefaultRouter()\nno_namespace_router.register('no_ns_model', NoNamespaceModelViewSet)\n\n\n# namespace1: Model, serializer and viewset\n\n\nclass Namespace1Model(models.Model):\n # Reference to NoNamespaceModel\n fk_no_ns_model = models.ForeignKey(NoNamespaceModel)\n\n\nclass Namespace1ModelSerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Namespace1Model\n\n\nclass Namespace1ModelViewSet(viewsets.ModelViewSet):\n queryset = Namespace1Model.objects.all()\n serializer_class = Namespace1ModelSerializer\n\n\nnamespace1_router = DefaultRouter()\nnamespace1_router.register('ns_1_model', Namespace1ModelViewSet)\n\n\n# namespace2: Models, serializers and viewsets\n\n\nclass Namespace2Model1(models.Model):\n # Reference to Namespace1Model\n fk_ns_1_model = models.ForeignKey(Namespace1Model)\n\n\nclass Namespace2Model2(models.Model):\n # Reference to Namespace2Model1\n fk_ns_2_model_1 = models.ForeignKey(Namespace2Model1)\n\n\nclass Namespace2Model1Serializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Namespace2Model1\n\n\nclass Namespace2Model2Serializer(serializers.HyperlinkedModelSerializer):\n fk_ns_2_model_1 = Namespace2Model1Serializer(read_only=True)\n\n class Meta:\n model = Namespace2Model2\n\n\nclass Namespace2Model1ViewSet(viewsets.ModelViewSet):\n queryset = Namespace2Model1.objects.all()\n serializer_class = Namespace2Model1Serializer\n\n\nclass Namespace2Model2ViewSet(viewsets.ModelViewSet):\n queryset = Namespace2Model2.objects.all()\n serializer_class = Namespace2Model2Serializer\n\n\nnamespace2_router = DefaultRouter()\nnamespace2_router.register('ns_2_model_1', Namespace2Model1ViewSet)\nnamespace2_router.register('ns_2_model_2', Namespace2Model2ViewSet)\n\n\nurlpatterns += [\n url(r'^nonamespace/', include(no_namespace_router.urls)),\n url(r'^namespace1/', include(namespace1_router.urls, namespace='namespace1')),\n url(r'^namespace2/', include(namespace2_router.urls, namespace='namespace2')),\n]\n\n\nclass NamespaceTestCase(APITestCase):\n\n def setUp(self):\n self.request = factory.request()\n self.no_ns_item = NoNamespaceModel.objects.create()\n self.ns_1_item = Namespace1Model.objects.create(fk_no_ns_model=self.no_ns_item)\n self.ns_2_model_1_item = Namespace2Model1.objects.create(fk_ns_1_model=self.ns_1_item)\n self.ns_2_model_2_item = Namespace2Model2.objects.create(fk_ns_2_model_1=self.ns_2_model_1_item)\n self.url_no_ns_item = '/nonamespace/no_ns_model/{pk}/'.format(pk=self.no_ns_item.pk)\n self.url_ns_1_item = '/namespace1/ns_1_model/{pk}/'.format(pk=self.ns_1_item.pk)\n self.url_ns_2_model_1_item = '/namespace2/ns_2_model_1/{pk}/'.format(pk=self.ns_2_model_1_item.pk)\n self.url_ns_2_model_2_item = '/namespace2/ns_2_model_2/{pk}/'.format(pk=self.ns_2_model_2_item.pk)\n\n def test_reverse_with_namespace(self):\n # Namespace 1\n reverse_ns_1_item = reverse('namespace1:namespace1model-detail', args=[self.ns_1_item.pk])\n self.assertEquals(reverse_ns_1_item, self.url_ns_1_item)\n\n # Namespace 2 - Model 1\n reverse_ns_2_model_1_item = reverse('namespace2:namespace2model1-detail', args=[self.ns_2_model_1_item.pk])\n self.assertEquals(reverse_ns_2_model_1_item, self.url_ns_2_model_1_item)\n\n # Namespace 2 - Model 2\n reverse_ns_2_model_2_item = reverse('namespace2:namespace2model2-detail', args=[self.ns_2_model_2_item.pk])\n self.assertEquals(reverse_ns_2_model_2_item, self.url_ns_2_model_2_item)\n\n def test_hyperlinked_identity_field_with_no_namespace(self):\n response = self.client.get(self.url_ns_1_item)\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n self.assertEquals(response.data.get('url', None), self.request.build_absolute_uri(self.url_ns_1_item))\n\n # Test the hyperlink of the NoNamespaceModel FK\n fk_url = response.data.get('fk_no_ns_model', None)\n self.assertEquals(fk_url, self.request.build_absolute_uri(self.url_no_ns_item))\n\n def test_hyperlinked_identity_field_with_different_namespace(self):\n response = self.client.get(self.url_ns_2_model_1_item)\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n self.assertEquals(response.data.get('url', None), self.request.build_absolute_uri(self.url_ns_2_model_1_item))\n # Test the hyperlink of the NameSpace1Model FK\n self.assertEquals(response.data.get('fk_ns_1_model', None), self.request.build_absolute_uri(self.url_ns_1_item))\n\n def test_hyperlinked_identity_field_with_same_namespace(self):\n response = self.client.get(self.url_ns_2_model_2_item)\n self.assertEquals(response.status_code, status.HTTP_200_OK)\n self.assertEquals(response.data.get('url', None), self.request.build_absolute_uri(self.url_ns_2_model_2_item))\n response_item = response.data.get('fk_ns_2_model_1', {})\n # Test the hyperlink of the Namespace2Model1 FK\n self.assertEquals(response_item.get('url', None), self.request.build_absolute_uri(self.url_ns_2_model_1_item))\n # Test the hyperlink of the NameSpace1Model FK\n self.assertEquals(response_item.get('fk_ns_1_model', None), self.request.build_absolute_uri(self.url_ns_1_item))\n", "sub_path": "tests/test_namespaces.py", "file_name": "test_namespaces.py", "file_ext": "py", "file_size_in_byte": 6012, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "rest_framework.test.APIRequestFactory", "line_number": 11, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 21, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 21, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 26, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 26, "usage_type": "name"}, {"api_name": "rest_framework.routers.DefaultRouter", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 43, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 43, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 48, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 48, "usage_type": "name"}, {"api_name": "rest_framework.routers.DefaultRouter", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.models.Model", "line_number": 60, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 60, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 65, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 65, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 67, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 67, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 70, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 70, "usage_type": "name"}, {"api_name": "rest_framework.serializers.HyperlinkedModelSerializer", "line_number": 75, "usage_type": "attribute"}, {"api_name": "rest_framework.serializers", "line_number": 75, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 82, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 82, "usage_type": "name"}, {"api_name": "rest_framework.viewsets.ModelViewSet", "line_number": 87, "usage_type": "attribute"}, {"api_name": "rest_framework.viewsets", "line_number": 87, "usage_type": "name"}, {"api_name": "rest_framework.routers.DefaultRouter", "line_number": 92, "usage_type": "call"}, {"api_name": "urls.urlpatterns", "line_number": 97, "usage_type": "name"}, {"api_name": "django.conf.urls.url", "line_number": 98, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 98, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 99, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 99, "usage_type": "call"}, {"api_name": "django.conf.urls.url", "line_number": 100, "usage_type": "call"}, {"api_name": "django.conf.urls.include", "line_number": 100, "usage_type": "call"}, {"api_name": "rest_framework.test.APITestCase", "line_number": 104, "usage_type": "name"}, {"api_name": "rest_framework.reverse.reverse", "line_number": 119, "usage_type": "call"}, {"api_name": "rest_framework.reverse.reverse", "line_number": 123, "usage_type": "call"}, {"api_name": "rest_framework.reverse.reverse", "line_number": 127, "usage_type": "call"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 132, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 132, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 141, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 141, "usage_type": "name"}, {"api_name": "rest_framework.status.HTTP_200_OK", "line_number": 148, "usage_type": "attribute"}, {"api_name": "rest_framework.status", "line_number": 148, "usage_type": "name"}]} +{"seq_id": "489831361", "text": "from rest_framework.response import Response \nfrom .models import Event \nfrom .serializer import EventSerializer\nfrom rest_framework.decorators import api_view\n\n\n@api_view(['GET'])\ndef eventList(request):\n events = Event.objects.all()\n serializer = EventSerializer(events, many=True)\n return Response(serializer.data)\n\n@api_view(['GET'])\ndef eventDetail(request, pk):\n events = Event.objects.get(id=pk)\n serializer = EventSerializer(events, many=False)\n return Response(serializer.data)\n \n@api_view(['POST'])\ndef eventCreate(request):\n serializer = EventSerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n\n@api_view(['POST'])\ndef eventUpdate(request, pk):\n event = Event.objects.get(id=pk)\n serializer = EventSerializer(instance=event, data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n\n@api_view(['DELETE'])\ndef eventDelete(request, pk):\n event = Event.objects.get(id=pk)\n event.delete()\n\n return Response('Deleted')\n\n\n", "sub_path": "events/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1096, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "models.Event.objects.all", "line_number": 9, "usage_type": "call"}, {"api_name": "models.Event.objects", "line_number": 9, "usage_type": "attribute"}, {"api_name": "models.Event", "line_number": 9, "usage_type": "name"}, {"api_name": "serializer.EventSerializer", "line_number": 10, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 11, "usage_type": "call"}, {"api_name": "serializer.data", "line_number": 11, "usage_type": "attribute"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 7, "usage_type": "call"}, {"api_name": "models.Event.objects.get", "line_number": 15, "usage_type": "call"}, {"api_name": "models.Event.objects", "line_number": 15, "usage_type": "attribute"}, {"api_name": "models.Event", "line_number": 15, "usage_type": "name"}, {"api_name": "serializer.EventSerializer", "line_number": 16, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 17, "usage_type": "call"}, {"api_name": "serializer.data", "line_number": 17, "usage_type": "attribute"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 13, "usage_type": "call"}, {"api_name": "serializer.EventSerializer", "line_number": 21, "usage_type": "call"}, {"api_name": "serializer.is_valid", "line_number": 23, "usage_type": "call"}, {"api_name": "serializer.save", "line_number": 24, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 25, "usage_type": "call"}, {"api_name": "serializer.data", "line_number": 25, "usage_type": "attribute"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 19, "usage_type": "call"}, {"api_name": "models.Event.objects.get", "line_number": 30, "usage_type": "call"}, {"api_name": "models.Event.objects", "line_number": 30, "usage_type": "attribute"}, {"api_name": "models.Event", "line_number": 30, "usage_type": "name"}, {"api_name": "serializer.EventSerializer", "line_number": 31, "usage_type": "call"}, {"api_name": "serializer.is_valid", "line_number": 33, "usage_type": "call"}, {"api_name": "serializer.save", "line_number": 34, "usage_type": "call"}, {"api_name": "rest_framework.response.Response", "line_number": 35, "usage_type": "call"}, {"api_name": "serializer.data", "line_number": 35, "usage_type": "attribute"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 28, "usage_type": "call"}, {"api_name": "models.Event.objects.get", "line_number": 39, "usage_type": "call"}, {"api_name": "models.Event.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "models.Event", "line_number": 39, "usage_type": "name"}, {"api_name": "rest_framework.response.Response", "line_number": 42, "usage_type": "call"}, {"api_name": "rest_framework.decorators.api_view", "line_number": 37, "usage_type": "call"}]} +{"seq_id": "241405953", "text": "import glob\nimport numpy as np\nimport config\nimport os\n\n# find files in the raw directory\nfiles = glob.glob(\"raw/raw*\")\n\nos.system(\"rm output/*\")\n\n\n# number of arguments to expect per line\nkargs = 14\nksensors = 12\n\n# buffer for duplicate timestamps\nbuf_value = 100000\n\n# take raw data files and convert to a dictionary of timestamps\n# each timestamp has an array of packets.\ndef parse_data(files):\n cycles = []\n # the data dictionary used to store all relevant values\n data = {}\n \n # iterate through all generated raw files\n for file in files:\n\n f = open(file, 'r')\n raw = f.readlines()\n f.close()\n\n # initialize buffer to zero, this is used to increment the timestamps if\n # a reset occurs during the reading.\n buf = 0\n\n # traverse through the file and add good data to dictionary\n for line in raw:\n # check if timestamp reset, add a fixed value\n if str(line[0]) == \"t\":\n # increment by a large enough value, say 1000\n buf += buf_value\n # check if line begins with correct letter\n if line[0] != \"I\":\n continue\n\n # split line by whitespace\n line = line.split()\n\n # verify line is the correct length\n if len(line) != kargs:\n continue\n\n # create pseudo timestamp\n time = int(buf) + int(line[3])\n\n # populate packet with this line's info. this will be appended to the\n # data dictionary at the given timestamp.\n packet = {}\n packet[\"id\"] = line[1]\n packet[\"led_state\"] = line[5]\n packet[\"r\"] = line[7]\n packet[\"g\"] = line[9]\n packet[\"b\"] = line[11]\n packet[\"c\"] = line[13]\n\n # add packet to appropriate time in the data dictionary\n if time in data.keys():\n data[time].append(packet)\n else:\n data[time] = []\n data[time].append(packet)\n\n # trim data to only contain timestamps for which we have all data.\n # if that timestamp has exactly ksensors number of packets,\n # then we have received all the data for that timestamp.\n bad_times = []\n for time in data.keys():\n if len(data[time]) != ksensors:\n bad_times.append(time)\n\n for time in bad_times:\n del data[time]\n\n\n return data\n\n# take sensor-wise state values and determine true state id\n# from the configured lighting pattern\ndef determine_state(vec, mem):\n mat = config.CFG[\"state_matrix\"]\n \n krow, kcol = mat.shape\n # array used to find rows which match the state\n match = np.array([0]*krow)\n \n i = 0\n for row in mat:\n if (row == vec).all():\n match[i] = 1\n i += 1\n\n match_indices = np.array(match.nonzero()).flatten()\n if len(match_indices) == 1:\n return match_indices[0]\n elif len(match_indices) == 0:\n print(\"======= ERROR: state not found\")\n return -1\n\n\n inc = 0\n # continue looking until we have gone through all memory\n while (inc < len(mem)):\n mem_match_indices = [] # populate with current step match\n prev_mem_val = mem[-inc] # get mem row corresponding to step\n\n for match in match_indices:\n prev_mem_loc = match - inc\n row = mat[prev_mem_loc,:]\n if (row == prev_mem_val).all():\n # if this previous value is consistent\n # add to new match list\n mem_match_indices.append(prev_mem_loc)\n\n if len(mem_match_indices) == 1:\n return mem_match_indices[0]\n match_indices = mem_match_indices\n # go back another step\n inc += 1\n print(\"warning, skipping additional matches\")\n if len(match_indices) > 0:\n return match_indices[0]\n else:\n print(\"Could not accurately find state id\")\n return None\n \n\n# send parsed to one master file\ndef save_to_file(data, filename):\n # iterate through all timestamps and generate strings to print\n inc = 0\n mem = []\n\n file_num = 0\n \n this_file = filename + \"_\" + str(file_num) + \".txt\"\n print(\"created file: \", this_file)\n f = open(this_file, \"w\")\n \n for time in data.keys():\n \n this_time = data[time]\n output = {}\n # initialize state vector to populate with sensor-wise state info.\n # this will be compared against the expected pattern in the config\n # file to get the state that the system is in at this timestamp\n state_vec = np.array([-1]*ksensors)\n # get data from each sensor at the given timestamp.\n for i in range(0, ksensors):\n sensor = this_time[i]\n this_id = sensor[\"id\"]\n\n # get specific sensor state and update state_vec\n state = sensor[\"led_state\"]\n \n state_vec[int(this_id)] = int(state)\n\n # generate output string\n output[int(this_id)] = \"\\tID: \" + this_id + \"\\tState: \" + state + \" R: \" + sensor[\"r\"] + \" G: \" + sensor[\"g\"] + \" B: \" + sensor[\"b\"] + \" C: \" + sensor[\"c\"]+\"\\n\"\n\n # get true state id from the configured lighting pattern\n state_id = determine_state(state_vec, mem)\n if state_id == 0:\n this_file = filename + \"_\" + str(file_num) + \".txt\"\n print(\"created file: \", this_file)\n f.close()\n f = open(this_file, \"w\")\n file_num += 1\n mem.append(state_vec)\n if len(mem) > 2*ksensors:\n mem.pop(0)\n\n # write data\n # get rid of excess buffer created\n time = time % buf_value\n line = \"Timestamp: \" + str(time) + \" State ID: \" + str(state_id)+\"\\n\"\n f.write(line)\n for i in range(0,ksensors):\n f.write(output[i])\n f.close()\n\nif __name__ == \"__main__\":\n # create dictionary of all timestamps\n my_data = parse_data(files)\n\n # format to strings and write to file\n save_to_file(my_data, \"output/output\")\n", "sub_path": "usb_serial/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 6069, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "glob.glob", "line_number": 7, "usage_type": "call"}, {"api_name": "os.system", "line_number": 9, "usage_type": "call"}, {"api_name": "config.CFG", "line_number": 91, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 157, "usage_type": "call"}]} +{"seq_id": "635209262", "text": "# setup for flask app\nfrom flask import Flask, app\nfrom flask_sqlalchemy import SQLAlchemy\nfrom os import path\nfrom flask_login import LoginManager, login_manager\n\ndb = SQLAlchemy() # init db\nDB_NAME = 'database.db'\n\n\n# creates the flask app\ndef create_app():\n app = Flask(__name__)\n # for session cookies and other info\n app.config['SECRET_KEY'] = 'secret key for cookies'\n # creat the db and configure it\n app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{DB_NAME}'\n db.init_app(app) # init db with current app\n\n\n from .routes import routes\n from .auth import auth\n\n app.register_blueprint(routes, url_prefix='/')\n app.register_blueprint(auth, url_prefix='/')\n \n # we want to run or get the db before UI\n from .models import User, Note\n \n login_manager = LoginManager()\n login_manager.login_view = 'auth.login'\n login_manager.init_app(app)\n \n # how to load the user\n # loads user by the id that is passed in\n @login_manager.user_loader\n def load_user(id):\n return User.query.get(int(id))\n\n create_database(app)\n\n return app\n\n\ndef create_database(app):\n if not path.exists(\"website/\" + DB_NAME):\n db.create_all(app=app)\n print(\"Created Dataabse!\")\n", "sub_path": "website/__init__.py", "file_name": "__init__.py", "file_ext": "py", "file_size_in_byte": 1249, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "flask_sqlalchemy.SQLAlchemy", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.app", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.Flask", "line_number": 13, "usage_type": "call"}, {"api_name": "flask.app.config", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.app", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.app.config", "line_number": 17, "usage_type": "attribute"}, {"api_name": "flask.app", "line_number": 17, "usage_type": "name"}, {"api_name": "flask.app", "line_number": 18, "usage_type": "argument"}, {"api_name": "flask.app.register_blueprint", "line_number": 24, "usage_type": "call"}, {"api_name": "routes.routes", "line_number": 24, "usage_type": "argument"}, {"api_name": "flask.app", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.app.register_blueprint", "line_number": 25, "usage_type": "call"}, {"api_name": "auth.auth", "line_number": 25, "usage_type": "argument"}, {"api_name": "flask.app", "line_number": 25, "usage_type": "name"}, {"api_name": "flask_login.login_manager", "line_number": 30, "usage_type": "name"}, {"api_name": "flask_login.LoginManager", "line_number": 30, "usage_type": "call"}, {"api_name": "flask_login.login_manager.login_view", "line_number": 31, "usage_type": "attribute"}, {"api_name": "flask_login.login_manager", "line_number": 31, "usage_type": "name"}, {"api_name": "flask_login.login_manager.init_app", "line_number": 32, "usage_type": "call"}, {"api_name": "flask.app", "line_number": 32, "usage_type": "argument"}, {"api_name": "flask_login.login_manager", "line_number": 32, "usage_type": "name"}, {"api_name": "models.User.query.get", "line_number": 38, "usage_type": "call"}, {"api_name": "models.User.query", "line_number": 38, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 38, "usage_type": "name"}, {"api_name": "flask_login.login_manager.user_loader", "line_number": 36, "usage_type": "attribute"}, {"api_name": "flask_login.login_manager", "line_number": 36, "usage_type": "name"}, {"api_name": "flask.app", "line_number": 40, "usage_type": "argument"}, {"api_name": "flask.app", "line_number": 42, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 46, "usage_type": "call"}, {"api_name": "os.path", "line_number": 46, "usage_type": "name"}, {"api_name": "flask.app", "line_number": 47, "usage_type": "name"}]} +{"seq_id": "448539501", "text": "#-*- coding:utf-8 -*-\n\"\"\"\n---\n #modified birth From DateTime to Date,2015-12-24\n #add family history field,2015-12-24\n---\n\n\"\"\"\nimport uuid\nfrom django.db import models\n\nclass Patient(models.Model):\n sex_choices = (\n ('M',u'男'),\n ('F',u'女'),\n )\n uuid = models.UUIDField(primary_key=True,default=uuid.uuid4,editable=False)\n name = models.CharField(max_length=30)\n clinic_number =models.PositiveIntegerField(unique=True)\n birth = models.DateField()\n sex = models.CharField(max_length=1,choices=sex_choices)\n date_joined = models.DateField(auto_now_add=True)\n family_history = models.CharField(max_length=100,blank=True)\n \n def __unicode__(self):\n return self.name\n\nRelative_Choices = (\n(u\"Father\",u\"父亲\"),\n(u\"Mother\",u\"母亲\"),\n(u\"Brother\",u\"兄弟\"),\n(u\"Sister\",u\"姐妹\"),\n(u\"Son\",u\"儿子\"),\n(u\"Daughter\",u\"女儿\"),\n(u\"Husband\",u\"丈夫\"),\n(u\"Wife\",u\"妻子\"),)\n\nclass Relation(models.Model):\n person = models.ForeignKey(Patient,related_name=\"person\")\n relative = models.ForeignKey(Patient,related_name=\"relation_set\")\n relation = models.CharField(max_length=10,choices=Relative_Choices)\n priority = models.IntegerField(default=1)\n \n def __unicode__(self):\n return self.person.name\n\n", "sub_path": "Patients/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 1279, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.db.models.Model", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.UUIDField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}, {"api_name": "uuid.uuid4", "line_number": 17, "usage_type": "attribute"}, {"api_name": "django.db.models.CharField", "line_number": 18, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 18, "usage_type": "name"}, {"api_name": "django.db.models.PositiveIntegerField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 22, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 22, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 23, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 23, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 38, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 40, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 40, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 41, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 41, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 42, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 42, "usage_type": "name"}]} +{"seq_id": "89818258", "text": "import random\n\nfrom blessed import Terminal\n\nterm = Terminal()\n\n\ndef grass() -> str:\n \"\"\"Returns random Grass Unicode character\"\"\"\n return getattr(term, 'on_'+random.choice(green))(term.chartreuse3('▓'))\n\n\ndef tree() -> str:\n \"\"\"Returns random tree Unicode character\"\"\"\n return getattr(term, 'on_'+random.choice(green))(random.choice('🌲🌳🌴'))\n\n\ndef water() -> str:\n \"\"\"Returns ranodm water Unicode character\"\"\"\n return term.steelblue4_on_steelblue2(random.choice(' '))\n\n\ngreen = [\n 'chartreuse',\n 'chartreuse2',\n 'chartreuse3',\n]\n\nd = {\n 'grass': grass,\n 'tree': tree,\n 'water': water\n}\n\nfreq_weights = {\n 'grass': 3,\n 'tree': 2,\n 'water': 1\n}\n\nprint(term.home+term.clear, end='')\nw, h = term.width, term.height\n\nfor j in range(1, h, 3):\n for i in range(1, w, 3):\n typ, obj = random.choices(list(d.items()), weights=(90, 8, 2))[0]\n\n print(term.move_xy(i, j) + obj())\n chances = [(96, 3, 1), (75, 21, 4)]\n ix = [(x, y) for x in range(-1, 2) for y in range(-1, 2) if not (x == 0 and y == 0)]\n\n for x, y in ix:\n p, q = i + x, j + y\n st = set(d.keys())\n st.discard(typ)\n ls = obj(), *map(lambda x: d[x](), sorted(st, key=lambda x: freq_weights[x], reverse=True))\n if p in range(w) and q in range(h-1):\n print(term.move_xy(p, q) + random.choices(ls, weights=chances[0])[0])\n\n'''\nFor reference only\n\n▓ \t▒ \t░\n\n░▒▓█▇▆▅▄▃▂\n▂▃▅▇█▓▒░۩۞۩ ۩۞۩░▒▓█▇▅▃▂\n▂ ▃ ▄ ▅ ▆ ▇ █▓▒░ ░▒▓█▇▆▅ ▄▃▂ ▂ ▃ ▄ ▅ ▆ ▇ █▓▒░ ░▒\n'''\n\n# 🌲\n# evergreen tree\n\n# U+1F332\n\n# 🌳\n# deciduous tree\n\n# U+1F333\n\n# 🌴\n# palm tree\n\n# U+1F334\n\n# 🍁\n# maple leaf\n\n# U+1F341\n\n# 🎄\n# christmas tree\n\n# U+1F384\n\n# 🎋\n# tanabata tree\n\n# U+1F38B\n", "sub_path": "samples/map.py", "file_name": "map.py", "file_ext": "py", "file_size_in_byte": 1888, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "blessed.Terminal", "line_number": 5, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 10, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 15, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 20, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 46, "usage_type": "call"}, {"api_name": "random.choices", "line_number": 58, "usage_type": "call"}]} +{"seq_id": "506956319", "text": "'''\nselenium 透過 cookie 免登錄爬文\n\n讀取 cookie,有效,但 facebook 仍然擋住要輸入密碼\n\nCreated on 2020年5月4日\n@author: rocky\n'''\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport time\nfrom selenium.webdriver.common.keys import Keys\nimport pickle\n\n\ndef main():\n \n \n options = Options()\n # options.add_argument(\"--headless\")\n driver = webdriver.Chrome(options=options)\n \n cookies = pickle.load(open(\"cookies.pkl\", \"rb\"))\n print(cookies)\n driver.get(\"http://www.facebook.com\")\n for cookie in cookies:\n print(cookie)\n cookie['expiry'] = int(cookie['expiry']) # 遇到 invalid argument: invalid 'expiry' 問題,裡面有浮點數,幫它轉成 int \n driver.add_cookie(cookie)\n \n driver.get(\"http://www.facebook.com\") \n \n time.sleep(15)\n \n print(\"completed\")\n\n\nif __name__ == \"__main__\":\n main()", "sub_path": "scrapyDemo/cookieDemo5.py", "file_name": "cookieDemo5.py", "file_ext": "py", "file_size_in_byte": 937, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "selenium.webdriver.chrome.options.Options", "line_number": 19, "usage_type": "call"}, {"api_name": "selenium.webdriver.Chrome", "line_number": 21, "usage_type": "call"}, {"api_name": "selenium.webdriver", "line_number": 21, "usage_type": "name"}, {"api_name": "pickle.load", "line_number": 23, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "536090454", "text": "# -*- coding: utf-8 -*-\n\n\"\"\"\nmorphsnakes\n===========\n\nThis is a Python implementation of the algorithms introduced in the paper\n\n Márquez-Neila, P., Baumela, L., Álvarez, L., \"A morphological approach\n to curvature-based evolution of curves and surfaces\". IEEE Transactions\n on Pattern Analysis and Machine Intelligence (PAMI), 2013.\n\nThis implementation is intended to be as brief, understandable and self-contained\nas possible. It does not include any enhancement to make it fast or efficient.\n\nAny practical implementation of this algorithm should work only over the\nneighbor pixels of the 0.5-levelset, not over all the embedding function,\nand perhaps should feature multi-threading or GPU capabilities.\n\nThe classes MorphGAC and MorphACWE provide most of the functionality of this\nmodule. They implement the Morphological Geodesic Active Contours and the\nMorphological Active Contours without Edges, respectively. See the\naforementioned paper for full details.\n\nSee test.py for examples of usage.\n\nLICENSE AND COPYRIGHT APPLICABLE TO MORPHSNAKES ONLY\nCopyright (c) 2013-2015, P. M. Neila\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n* Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n* Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n* Neither the name of morphsnakes nor the names of its\n contributors may be used to endorse or promote products derived from\n this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\nFOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\nOR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\nOF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\n__author__ = \"P. Márquez Neila \"\n\nfrom itertools import cycle\n\nimport numpy as np\nfrom scipy import ndimage\nfrom scipy.ndimage import binary_dilation, binary_erosion, \\\n gaussian_filter, gaussian_gradient_magnitude\n\nclass fcycle(object):\n \n def __init__(self, iterable):\n \"\"\"Call functions from the iterable each time it is called.\"\"\"\n self.funcs = cycle(iterable)\n \n def __call__(self, *args, **kwargs):\n f = next(self.funcs)\n return f(*args, **kwargs)\n \n\n# SI and IS operators for 2D and 3D.\n_P2 = [np.eye(3), np.array([[0,1,0]]*3), np.flipud(np.eye(3)), np.rot90([[0,1,0]]*3)]\n_P3 = [np.zeros((3,3,3)) for i in range(9)]\n\n_P3[0][:,:,1] = 1\n_P3[1][:,1,:] = 1\n_P3[2][1,:,:] = 1\n_P3[3][:,[0,1,2],[0,1,2]] = 1\n_P3[4][:,[0,1,2],[2,1,0]] = 1\n_P3[5][[0,1,2],:,[0,1,2]] = 1\n_P3[6][[0,1,2],:,[2,1,0]] = 1\n_P3[7][[0,1,2],[0,1,2],:] = 1\n_P3[8][[0,1,2],[2,1,0],:] = 1\n\n_aux = np.zeros((0), dtype=np.float32)\ndef SI(u):\n \"\"\"SI operator.\"\"\"\n global _aux\n if np.ndim(u) == 2:\n P = _P2\n elif np.ndim(u) == 3:\n P = _P3\n else:\n raise ValueError(\"u has an invalid number of dimensions (should be 2 or 3)\")\n \n if u.shape != _aux.shape[1:]:\n _aux = np.zeros((len(P),) + u.shape, dtype=np.float32)\n \n for _aux_i, P_i in zip(_aux, P):\n _aux_i[:] = binary_erosion(u, P_i)\n \n return _aux.max(0)\n\ndef IS(u):\n \"\"\"IS operator.\"\"\"\n global _aux\n if np.ndim(u) == 2:\n P = _P2\n elif np.ndim(u) == 3:\n P = _P3\n else:\n raise ValueError(\"u has an invalid number of dimensions (should be 2 or 3)\")\n \n if u.shape != _aux.shape[1:]:\n _aux = np.zeros((len(P),) + u.shape, dtype=np.float32)\n \n for _aux_i, P_i in zip(_aux, P):\n _aux_i[:] = binary_dilation(u, P_i)\n \n return _aux.min(0)\n\n# SIoIS operator.\nSIoIS = lambda u: SI(IS(u))\nISoSI = lambda u: IS(SI(u))\ncurvop = fcycle([SIoIS, ISoSI])\n\n# Stopping factors (function g(I) in the paper).\ndef gborders(img, alpha=1.0, sigma=1.0):\n \"\"\"Stopping criterion for image borders.\"\"\"\n # The norm of the gradient.\n gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant')\n return 1.0/np.sqrt(1.0 + alpha*gradnorm)\n\ndef glines(img, sigma=1.0):\n \"\"\"Stopping criterion for image black lines.\"\"\"\n return gaussian_filter(img, sigma)\n\nclass MorphACWE(object):\n \"\"\"Morphological ACWE based on the Chan-Vese energy functional.\"\"\"\n \n def __init__(self, data, smoothing=1, lambda1=1, lambda2=1):\n \"\"\"Create a Morphological ACWE solver.\n \n Parameters\n ----------\n data : ndarray\n The image data.\n smoothing : scalar\n The number of repetitions of the smoothing step (the\n curv operator) in each iteration. In other terms,\n this is the strength of the smoothing. This is the\n parameter µ.\n lambda1, lambda2 : scalars\n Relative importance of the inside pixels (lambda1)\n against the outside pixels (lambda2).\n \"\"\"\n self._u = None\n self.smoothing = smoothing\n self.lambda1 = lambda1\n self.lambda2 = lambda2\n \n self.data = data\n \n def set_levelset(self, u):\n self._u = np.float32(u)\n self._u[u>0] = 1\n self._u[u<=0] = 0\n \n levelset = property(lambda self: self._u,\n set_levelset,\n doc=\"The level set embedding function (u).\")\n \n def step(self):\n \"\"\"Perform a single step of the morphological Chan-Vese evolution.\"\"\"\n # Assign attributes to local variables for convenience.\n u = self._u\n \n if u is None:\n raise ValueError(\"the levelset function is not set (use set_levelset)\")\n \n data = self.data\n\n # Determine c0 and c1.\n inside = u>0\n outside = u<=0\n c0 = data[outside].sum() / np.float32(outside.sum())\n c1 = data[inside].sum() / np.float32(inside.sum())\n\n # Image attachment.\n abs_dres = np.abs(np.array(np.gradient(u))).sum(0)\n #aux = abs_dres * (c0 - c1) * (c0 + c1 - 2*data)\n aux = abs_dres * (self.lambda1*(data - c1)**2 - self.lambda2*(data - c0)**2)\n \n res = np.copy(u)\n res[aux < 0] = 1\n res[aux > 0] = 0\n\n # Smoothing.\n for i in range(self.smoothing):\n res = curvop(res)\n \n self._u = res\n \n def run(self, iterations):\n \"\"\"Run several iterations of the morphological Chan-Vese method.\"\"\"\n for i in range(iterations):\n self.step()\n \n\nclass MorphGAC(object):\n \"\"\"Morphological GAC based on the Geodesic Active Contours.\"\"\"\n \n def __init__(self, data, smoothing=1, threshold=0, balloon=0):\n \"\"\"Create a Morphological GAC solver.\n \n Parameters\n ----------\n data : array-like\n The stopping criterion g(I). See functions gborders and glines.\n smoothing : scalar\n The number of repetitions of the smoothing step in each\n iteration. This is the parameter µ.\n threshold : scalar\n The threshold that determines which areas are affected\n by the morphological balloon. This is the parameter θ.\n balloon : scalar\n The strength of the morphological balloon. This is the parameter ν.\n \"\"\"\n self._u = None\n self._v = balloon\n self._theta = threshold\n self.smoothing = smoothing\n \n self.set_data(data)\n \n def set_levelset(self, u):\n self._u = np.float32(u)\n self._u[u>0] = 1\n self._u[u<=0] = 0\n \n def set_balloon(self, v):\n self._v = v\n self._update_mask()\n \n def set_threshold(self, theta):\n self._theta = theta\n self._update_mask()\n \n def set_data(self, data):\n self._data = data\n self._ddata = np.gradient(data)\n self._update_mask()\n # The structure element for binary dilation and erosion.\n self.structure = np.ones((3,)*np.ndim(data))\n \n def _update_mask(self):\n \"\"\"Pre-compute masks for speed.\"\"\"\n self._threshold_mask = self._data > self._theta\n self._threshold_mask_v = self._data > self._theta/np.abs(self._v)\n \n levelset = property(lambda self: self._u,\n set_levelset,\n doc=\"The level set embedding function (u).\")\n data = property(lambda self: self._data,\n set_data,\n doc=\"The data that controls the snake evolution (the image or g(I)).\")\n balloon = property(lambda self: self._v,\n set_balloon,\n doc=\"The morphological balloon parameter (ν (nu, not v)).\")\n threshold = property(lambda self: self._theta,\n set_threshold,\n doc=\"The threshold value (θ).\")\n \n def step(self):\n \"\"\"Perform a single step of the morphological snake evolution.\"\"\"\n # Assign attributes to local variables for convenience.\n u = self._u\n gI = self._data\n dgI = self._ddata\n theta = self._theta\n v = self._v\n \n if u is None:\n raise ValueError(\"the levelset is not set (use set_levelset)\")\n \n res = np.copy(u)\n \n # Balloon.\n if v > 0:\n aux = binary_dilation(u, self.structure)\n elif v < 0:\n aux = binary_erosion(u, self.structure)\n if v!= 0:\n res[self._threshold_mask_v] = aux[self._threshold_mask_v]\n \n # Image attachment.\n aux = np.zeros_like(res, dtype=np.float32)\n dres = np.gradient(res)\n for el1, el2 in zip(dgI, dres):\n aux += el1*el2\n res[aux > 0] = 1\n res[aux < 0] = 0\n \n # Smoothing.\n for i in range(self.smoothing):\n res = curvop(res)\n \n self._u = res\n \n def run(self, iterations):\n \"\"\"Run several iterations of the morphological snakes method.\"\"\"\n for i in range(iterations):\n self.step()\n \n\ndef evolve_visual(msnake, levelset=None, num_iters=20, background=None):\n \"\"\"\n Visual evolution of a morphological snake.\n \n Parameters\n ----------\n msnake : MorphGAC or MorphACWE instance\n The morphological snake solver.\n levelset : array-like, optional\n If given, the levelset of the solver is initialized to this. If not\n given, the evolution will use the levelset already set in msnake.\n num_iters : int, optional\n The number of iterations.\n background : array-like, optional\n If given, background will be shown behind the contours instead of\n msnake.data.\n \"\"\"\n from matplotlib import pyplot as ppl\n \n if levelset is not None:\n msnake.levelset = levelset\n \n # Prepare the visual environment.\n fig = ppl.gcf()\n fig.clf()\n ax1 = fig.add_subplot(1,2,1)\n if background is None:\n ax1.imshow(msnake.data, cmap=ppl.cm.gray)\n else:\n ax1.imshow(background, cmap=ppl.cm.gray)\n ax1.contour(msnake.levelset, [0.5], colors='r')\n \n ax2 = fig.add_subplot(1,2,2)\n ax_u = ax2.imshow(msnake.levelset)\n ppl.pause(0.001)\n \n # Iterate.\n for i in range(num_iters):\n # Evolve.\n msnake.step()\n \n # Update figure.\n del ax1.collections[0]\n ax1.contour(msnake.levelset, [0.5], colors='r')\n ax_u.set_data(msnake.levelset)\n fig.canvas.draw()\n #ppl.pause(0.001)\n \n # Return the last levelset.\n return msnake.levelset\n\ndef evolve_visual3d(msnake, levelset=None, num_iters=20):\n \"\"\"\n Visual evolution of a three-dimensional morphological snake.\n \n Parameters\n ----------\n msnake : MorphGAC or MorphACWE instance\n The morphological snake solver.\n levelset : array-like, optional\n If given, the levelset of the solver is initialized to this. If not\n given, the evolution will use the levelset already set in msnake.\n num_iters : int, optional\n The number of iterations.\n \"\"\"\n from mayavi import mlab\n import matplotlib.pyplot as ppl\n \n if levelset is not None:\n msnake.levelset = levelset\n \n fig = mlab.gcf()\n mlab.clf()\n src = mlab.pipeline.scalar_field(msnake.data)\n mlab.pipeline.image_plane_widget(src, plane_orientation='x_axes', colormap='gray')\n cnt = mlab.contour3d(msnake.levelset, contours=[0.5])\n \n @mlab.animate(ui=True)\n def anim():\n for i in range(num_iters):\n msnake.step()\n cnt.mlab_source.scalars = msnake.levelset\n print(\"Iteration %s/%s...\" % (i + 1, num_iters))\n yield\n \n anim()\n mlab.show()\n \n # Return the last levelset.\n return msnake.levelset\n", "sub_path": "morphsnakes.py", "file_name": "morphsnakes.py", "file_ext": "py", "file_size_in_byte": 13511, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "itertools.cycle", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.eye", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.flipud", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.rot90", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 91, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 91, "usage_type": "attribute"}, {"api_name": "numpy.ndim", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.ndim", "line_number": 97, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 103, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 103, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.binary_erosion", "line_number": 106, "usage_type": "call"}, {"api_name": "numpy.ndim", "line_number": 113, "usage_type": "call"}, {"api_name": "numpy.ndim", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 121, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 121, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.binary_dilation", "line_number": 124, "usage_type": "call"}, {"api_name": "scipy.ndimage.gaussian_gradient_magnitude", "line_number": 137, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 138, "usage_type": "call"}, {"api_name": "scipy.ndimage.gaussian_filter", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 171, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 193, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.gradient", "line_number": 196, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.gradient", "line_number": 257, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.ndim", "line_number": 260, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 265, "usage_type": "call"}, {"api_name": "numpy.copy", "line_number": 292, "usage_type": "call"}, {"api_name": "scipy.ndimage.binary_dilation", "line_number": 296, "usage_type": "call"}, {"api_name": "scipy.ndimage.binary_erosion", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.zeros_like", "line_number": 303, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 303, "usage_type": "attribute"}, {"api_name": "numpy.gradient", "line_number": 304, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.gcf", "line_number": 345, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 345, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 349, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 349, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 351, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 351, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.pause", "line_number": 356, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 356, "usage_type": "name"}, {"api_name": "mayavi.mlab.gcf", "line_number": 393, "usage_type": "call"}, {"api_name": "mayavi.mlab", "line_number": 393, "usage_type": "name"}, {"api_name": "mayavi.mlab.clf", "line_number": 394, "usage_type": "call"}, {"api_name": "mayavi.mlab", "line_number": 394, "usage_type": "name"}, {"api_name": "mayavi.mlab.pipeline.scalar_field", "line_number": 395, "usage_type": "call"}, {"api_name": "mayavi.mlab.pipeline", "line_number": 395, "usage_type": "attribute"}, {"api_name": "mayavi.mlab", "line_number": 395, "usage_type": "name"}, {"api_name": "mayavi.mlab.pipeline.image_plane_widget", "line_number": 396, "usage_type": "call"}, {"api_name": "mayavi.mlab.pipeline", "line_number": 396, "usage_type": "attribute"}, {"api_name": "mayavi.mlab", "line_number": 396, "usage_type": "name"}, {"api_name": "mayavi.mlab.contour3d", "line_number": 397, "usage_type": "call"}, {"api_name": "mayavi.mlab", "line_number": 397, "usage_type": "name"}, {"api_name": "mayavi.mlab.animate", "line_number": 399, "usage_type": "call"}, {"api_name": "mayavi.mlab", "line_number": 399, "usage_type": "name"}, {"api_name": "mayavi.mlab.show", "line_number": 408, "usage_type": "call"}, {"api_name": "mayavi.mlab", "line_number": 408, "usage_type": "name"}]} +{"seq_id": "11722211", "text": "\"\"\"\nHello World Client\nConnects REQ socket to tcp://localhost:5555\nSends \"Hello\" to Server and expects \"World\" back.\n\"\"\"\nimport zmq\n\ncontext = zmq.Context()\nprint(\"Connecting to Server\")\nsocket = context.socket(zmq.REQ)\nsocket.connect(\"tcp://localhost:5555\")\n\nprint(\"Sending Request\")\nsocket.send(b\"Hello\")\nmessage =socket.recv()\nprint(\"Recieved Reply %s\" % message)\n", "sub_path": "client.py", "file_name": "client.py", "file_ext": "py", "file_size_in_byte": 367, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "zmq.Context", "line_number": 8, "usage_type": "call"}, {"api_name": "zmq.REQ", "line_number": 10, "usage_type": "attribute"}]} +{"seq_id": "614585731", "text": "# -*- coding: utf-8 -*-\n\n'''A group of Line objects.\n'''\n\nimport logging\nimport string\nfrom .Line import Line\nfrom .TextSpan import TextSpan\nfrom ..image.ImageSpan import ImageSpan\nfrom ..common.Collection import ElementCollection\nfrom ..common.share import TextAlignment\nfrom ..common import constants\n\n\nclass Lines(ElementCollection):\n '''Collection of text lines.'''\n\n @property\n def unique_parent(self):\n '''Whether all contained lines have same parant.'''\n if not bool(self): return False\n\n first_line = self._instances[0]\n return all(line.same_source_parent(first_line) for line in self._instances)\n\n\n def append(self, line:Line):\n \"\"\"Override. Append a line and update line pid and parent bbox.\n\n Args:\n line (Line): Target line to add.\n \"\"\"\n super().append(line)\n\n # update original parent id\n if not self._parent is None:\n line.pid = id(self._parent)\n\n\n def restore(self, raws:list):\n '''Construct lines from raw dicts list.'''\n for raw in raws:\n line = Line(raw)\n self.append(line)\n return self\n\n\n @property\n def image_spans(self):\n '''Get all ImageSpan instances.'''\n spans = []\n for line in self._instances:\n spans.extend(line.image_spans)\n return spans\n\n \n def join(self, line_overlap_threshold:float, line_merging_threshold:float):\n '''Merge lines aligned horizontally, e.g. make inline image as a span in text line.'''\n # skip if empty\n if not self._instances: return self\n\n # valid to merge lines on condition that every tow lines are in same row\n def valid_joining_lines(line, candidates):\n return all(line.in_same_row(_line) for _line in candidates)\n \n # merge lines\n def get_merged_line(candidates):\n line = candidates[0]\n for c_line in candidates[1:]:\n line.add(c_line.spans)\n return line\n\n # sort lines\n self.sort()\n\n # check each line\n lines = Lines()\n candidates = [self._instances[0]] # first line\n for i in range(1, len(self._instances)):\n pre_line, line = self._instances[i-1], self._instances[i]\n \n # ignore this line if overlap with previous line\n if line.get_main_bbox(pre_line, threshold=line_overlap_threshold):\n logging.warning('Ignore Line \"%s\" due to overlap', line.text)\n continue\n\n # add line directly if not aligned horizontally with previous line\n if not line.in_same_row(pre_line):\n to_join_line = False\n\n # if it exists x-distance obviously to previous line,\n # take it as a separate line as it is\n elif abs(line.bbox.x0-pre_line.bbox.x1) > line_merging_threshold:\n to_join_line = False \n\n # now, this line will be append to previous line as a span\n else:\n to_join_line = True\n\n # add line directly\n if not to_join_line:\n # merge candidate lines (if any)\n if candidates: lines.append(get_merged_line(candidates))\n candidates = []\n\n # add this line\n lines.append(line)\n \n # prepare for merging lines: valid\n elif valid_joining_lines(line, candidates):\n candidates.append(line)\n \n # prepare for merging lines: invalid -> add each line directly\n else:\n # release candidate lines\n for c_line in candidates: lines.append(c_line)\n candidates = []\n\n # add this line\n lines.append(line)\n\n # NOTE: in case last group\n if candidates: lines.append(get_merged_line(candidates))\n\n # update lines in block\n self.reset(lines)\n\n\n def split_back(self):\n '''Split lines into groups, in which all lines are from same original text block.\n\n Returns:\n list: A list of Lines contained in same original text block.\n '''\n fun = lambda a,b: a.same_source_parent(b)\n groups = self.group(fun)\n\n # NOTE: group() may destroy the order of lines, so sort in line level\n for group in groups: group.sort()\n\n return groups\n\n \n def split_vertically_by_text(self, line_break_free_space_ratio:float, new_paragraph_free_space_ratio:float):\n '''Split lines into separate paragraph, because ``PyMuPDF`` stores lines in ``block``,\n rather than real paragraph.\n\n .. note::\n Considered only normal reading direction, from left to right, from top\n to bottom.\n '''\n rows = self.group_by_physical_rows()\n\n # skip if only one row\n num = len(rows)\n if num==1: return rows\n\n # standard row width with first row excluded, considering potential indentation of fist line\n W = max(row[-1].bbox[2]-row[0].bbox[0] for row in rows[1:])\n H = sum(row[0].bbox[3]-row[0].bbox[1] for row in rows) / num\n\n # check row by row\n res = []\n lines = Lines()\n punc = tuple(constants.SENTENSE_END_PUNC)\n start_of_para = end_of_para = False # start/end of paragraph\n start_of_sen = end_of_sen = False # start/end of sentense\n for row in rows:\n end_of_sen = row[-1].text.strip().endswith(punc)\n w = row[-1].bbox[2]-row[0].bbox[0]\n\n # end of a sentense and free space at the end -> end of paragraph\n if end_of_sen and w/W <= 1.0-line_break_free_space_ratio:\n end_of_para = True\n\n # start of sentense and free space at the start -> start of paragraph\n elif start_of_sen and (W-w)/H >= new_paragraph_free_space_ratio:\n start_of_para = True\n\n # take action\n if end_of_para:\n lines.extend(row)\n res.append(lines)\n lines = Lines()\n elif start_of_para:\n res.append(lines)\n lines = Lines()\n lines.extend(row)\n else:\n lines.extend(row)\n\n # for next round\n start_of_sen = end_of_sen\n start_of_para = end_of_para = False\n \n # close the action\n if lines: res.append(lines)\n\n return res\n\n\n def strip(self, delete_end_line_hyphen:bool):\n '''Remove redundant blanks of each line and update bbox accordingly.'''\n # strip each line and update bbox: \n # keep at least one blank at both sides in case extra blanks existed\n strip_status = []\n strip_status.extend([line.strip() for line in self._instances])\n stripped = any(strip_status)\n if stripped: self._parent.update_bbox(self.bbox) # update bbox \n\n # word process:\n # - it might miss blank between words from adjacent lines\n # - it's optional to delete hyphen since it might not at the line end\n # after conversion\n\n punc_ex_hyphen = ''.join(c for c in string.punctuation if c!='-')\n def is_end_of_english_word(c):\n return c.isalnum() or (c and c in punc_ex_hyphen)\n \n for i, line in enumerate(self._instances[:-1]):\n # last char in this line\n end_span = line.spans[-1]\n if not isinstance(end_span, TextSpan): continue\n end_chars = end_span.chars\n if not end_chars: continue \n end_char = end_chars[-1]\n\n # first char in next line\n start_span = self._instances[i+1].spans[0]\n if not isinstance(start_span, TextSpan): continue\n start_chars = start_span.chars\n if not start_chars: continue \n next_start_char = start_chars[0] \n\n # delete hyphen if next line starts with lower case letter\n if delete_end_line_hyphen and \\\n end_char.c.endswith('-') and next_start_char.c.islower(): \n end_char.c = '' # delete hyphen in a tricky way\n\n\n # add a space if both the last char and the first char in next line are alphabet, \n # number, or English punctuation (excepting hyphen)\n if is_end_of_english_word(end_char.c) and is_end_of_english_word(next_start_char.c):\n end_char.c += ' ' # add blank in a tricky way\n \n return stripped\n\n\n def sort(self):\n '''Sort lines considering text direction.\n\n Taking natural reading direction for example: reading order for rows, from left to \n right for lines in row.\n\n In the following example, A should come before B.\n\n ::\n\n +-----------+\n +---------+ | |\n | A | | B |\n +---------+ +-----------+\n \n Steps:\n\n * Sort lines in reading order, i.e. from top to bottom, from left to right.\n * Group lines in row.\n * Sort lines in row: from left to right.\n '''\n # sort in reading order\n self.sort_in_reading_order()\n\n # split lines in separate row\n lines_in_rows = [] # type: list[list[Line]]\n\n for line in self._instances:\n\n # add lines to a row group if not in same row with previous line\n if not lines_in_rows or not line.in_same_row(lines_in_rows[-1][-1]):\n lines_in_rows.append([line])\n \n # otherwise, append current row group\n else:\n lines_in_rows[-1].append(line)\n \n # sort lines in each row: consider text direction\n idx = 0 if self.is_horizontal_text else 3\n self._instances = []\n for row in lines_in_rows:\n row.sort(key=lambda line: line.bbox[idx])\n self._instances.extend(row)\n\n\n def is_flow_layout(self, float_layout_tolerance:float, line_separate_threshold:float):\n '''Check if flow layout. \n \n A flow layout satisfy condition that lines in each physical row have:\n \n * same original text block\n * enough overlap in vertical direction.\n * no significant gap between adjacent two lines.\n '''\n # group lines in same row\n fun = lambda a, b: a.horizontally_align_with(b, factor=float_layout_tolerance) and \\\n not a.vertically_align_with(b, factor=constants.FACTOR_ALMOST) \n groups = self.group(fun) \n \n # check each row\n idx0, idx1 = (0, 2) if self.is_horizontal_text else (3, 1)\n for lines in groups:\n num = len(lines)\n if num==1: continue\n\n # same original parent\n if not all(line.same_source_parent(lines[0]) for line in lines):\n return False\n\n # check vertical overlap\n if not all(line.in_same_row(lines[0]) for line in lines):\n return False\n\n # check distance between lines\n for i in range(1, num):\n dis = abs(lines[i].bbox[idx0]-lines[i-1].bbox[idx1])\n if dis >= line_separate_threshold: return False\n\n return True\n\n\n def parse_text_format(self, shape):\n '''Parse text format with style represented by rectangle shape.\n \n Args:\n shape (Shape): Potential style shape applied on blocks.\n \n Returns:\n bool: Whether a valid text style.\n '''\n flag = False\n\n for line in self._instances:\n # any intersection in this line?\n intsec = shape.bbox & line.get_expand_bbox(constants.MAJOR_DIST)\n \n if not intsec: \n if shape.bbox.y1 < line.bbox.y0: break # lines must be sorted in advance\n continue\n\n # yes, then try to split the spans in this line\n split_spans = []\n for span in line.spans: \n # include image span directly\n if isinstance(span, ImageSpan): split_spans.append(span) \n\n # split text span with the format rectangle: span-intersection-span\n else:\n spans = span.split(shape, line.is_horizontal_text)\n split_spans.extend(spans)\n flag = True\n \n # update line spans \n line.spans.reset(split_spans)\n\n return flag\n\n\n def parse_line_break(self, bbox, \n line_break_width_ratio:float, \n line_break_free_space_ratio:float,\n line_condense_spacing:float):\n '''Whether hard break each line. In addition, condense charaters at end of line to avoid unexpected \n line break. PDF sets precisely width of each word, here just an approximation to set condense spacing\n for last two words.\n\n Args:\n bbox (Rect): bbox of parent layout, e.g. page or cell.\n line_break_width_ratio (float): user defined threshold, break line if smaller than this value.\n line_break_free_space_ratio (float): user defined threshold, break line if exceeds this value.\n line_condense_spacing (float): user defined total condensed spacing to avoid unexpected line break.\n\n Hard line break helps ensure paragraph structure, but pdf-based layout calculation may\n change in docx due to different rendering mechanism like font, spacing. For instance, when\n one paragraph row can't accommodate a Line, the hard break leads to an unnecessary empty row.\n Since we can't 100% ensure a same structure, it's better to focus on the content - add line\n break only when it's necessary to, e.g. short lines.\n '''\n\n block = self.parent \n idx0, idx1 = (0, 2) if block.is_horizontal_text else (3, 1)\n block_width = abs(block.bbox[idx1]-block.bbox[idx0])\n layout_width = bbox[idx1] - bbox[idx0]\n\n # hard break if exceed the width ratio\n line_break = block_width/layout_width <= line_break_width_ratio\n\n # check by each physical row\n rows = self.group_by_physical_rows()\n single_row = len(rows)==1\n for lines in rows:\n # ----------------------------\n # line break\n # ----------------------------\n for line in lines: line.line_break = 0\n\n # check the end line depending on text alignment\n if block.alignment == TextAlignment.RIGHT:\n end_line = lines[0]\n free_space = abs(block.bbox[idx0]-end_line.bbox[idx0])\n else:\n end_line = lines[-1]\n free_space = abs(block.bbox[idx1]-end_line.bbox[idx1])\n \n if block.alignment == TextAlignment.CENTER: free_space *= 2 # two side space\n \n # break line if \n # - width ratio lower than the threshold; or \n # - free space exceeds the threshold\n if line_break or free_space/block_width > line_break_free_space_ratio:\n end_line.line_break = 1\n\n # ----------------------------\n # character spacing\n # ----------------------------\n row_width = abs(lines[-1].bbox[idx1]-lines[0].bbox[idx0])\n if block_width-row_width>constants.MINOR_DIST: continue\n last_span = lines[-1].spans[-1]\n if isinstance(last_span, TextSpan) and not single_row: \n # condense characters if negative value\n last_span.condense_spacing = line_condense_spacing\n\n \n # no break for last row\n for line in rows[-1]: line.line_break = 0\n\n\n def parse_tab_stop(self, line_separate_threshold:float):\n '''Calculate tab stops for parent block and whether add TAB stop before each line. \n\n Args:\n line_separate_threshold (float): Don't need a tab stop if the line gap less than this value.\n '''\n # set all tab stop positions for parent block\n # Note these values are relative to the left boundary of parent block\n block = self.parent \n idx0, idx1 = (0, 2) if block.is_horizontal_text else (3, 1)\n fun = lambda line: round(abs(line.bbox[idx0]-block.bbox[idx0]), 1)\n all_pos = set(map(fun, self._instances))\n block.tab_stops = list(filter(lambda pos: pos>=constants.MINOR_DIST, all_pos))\n\n # no tab stop need\n if not block.tab_stops: return\n\n # otherwise, set tab stop option for each line\n ref = block.bbox[idx0]\n for i, line in enumerate(self._instances):\n # left indentation implemented with tab\n distance = line.bbox[idx0] - ref\n if distance>line_separate_threshold:\n line.tab_stop = 1\n\n # update stop reference position\n if line==self._instances[-1]: break\n ref = line.bbox[idx1] if line.in_same_row(self._instances[i+1]) else block.bbox[idx0]", "sub_path": "venv/Lib/site-packages/pdf2docx/text/Lines.py", "file_name": "Lines.py", "file_ext": "py", "file_size_in_byte": 17225, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "common.Collection.ElementCollection", "line_number": 16, "usage_type": "name"}, {"api_name": "Line.Line", "line_number": 28, "usage_type": "name"}, {"api_name": "Line.Line", "line_number": 44, "usage_type": "call"}, {"api_name": "logging.warning", "line_number": 85, "usage_type": "call"}, {"api_name": "common.constants.SENTENSE_END_PUNC", "line_number": 166, "usage_type": "attribute"}, {"api_name": "common.constants", "line_number": 166, "usage_type": "name"}, {"api_name": "string.punctuation", "line_number": 217, "usage_type": "attribute"}, {"api_name": "TextSpan.TextSpan", "line_number": 224, "usage_type": "argument"}, {"api_name": "TextSpan.TextSpan", "line_number": 231, "usage_type": "argument"}, {"api_name": "common.constants.FACTOR_ALMOST", "line_number": 306, "usage_type": "attribute"}, {"api_name": "common.constants", "line_number": 306, "usage_type": "name"}, {"api_name": "common.constants.MAJOR_DIST", "line_number": 344, "usage_type": "attribute"}, {"api_name": "common.constants", "line_number": 344, "usage_type": "name"}, {"api_name": "image.ImageSpan.ImageSpan", "line_number": 354, "usage_type": "argument"}, {"api_name": "common.share.TextAlignment.RIGHT", "line_number": 407, "usage_type": "attribute"}, {"api_name": "common.share.TextAlignment", "line_number": 407, "usage_type": "name"}, {"api_name": "common.share.TextAlignment.CENTER", "line_number": 414, "usage_type": "attribute"}, {"api_name": "common.share.TextAlignment", "line_number": 414, "usage_type": "name"}, {"api_name": "common.constants.MINOR_DIST", "line_number": 426, "usage_type": "attribute"}, {"api_name": "common.constants", "line_number": 426, "usage_type": "name"}, {"api_name": "TextSpan.TextSpan", "line_number": 428, "usage_type": "argument"}, {"api_name": "common.constants.MINOR_DIST", "line_number": 449, "usage_type": "attribute"}, {"api_name": "common.constants", "line_number": 449, "usage_type": "name"}]} +{"seq_id": "376341769", "text": "from collections import namedtuple\nimport requests\nimport json\n\n\nCDXParams = namedtuple('CDXParams', (\n 'url',\n 'fields',\n 'output',\n 'filter',\n 'from_timestamp',\n 'to_timestamp',\n 'limit',\n 'collapse',\n))\n\n\nclass CDXApi:\n api_url = 'http://web.archive.org/cdx/search/cdx?'\n\n def get_index(self, params=None):\n cdx_url = self.api_url\n\n if params:\n if params.url:\n cdx_url += '&url=' + params.url + '*'\n if params.fields:\n cdx_url += '&fields=' + params.fields\n if params.from_timestamp:\n cdx_url += '&from=' + params.from_timestamp\n if params.to_timestamp:\n cdx_url += '&to=' + params.to_timestamp\n if params.limit:\n cdx_url += '&limit=' + str(params.limit)\n if params.collapse:\n cdx_url += '&collapse=' + params.collapse\n if params.output:\n cdx_url += '&output=' + params.output\n if params.filter:\n cdx_url += '&filter=' + params.filter\n else:\n raise Exception('Empty CDX parameters.')\n\n # load index from web archive\n try:\n res = requests.get(cdx_url, timeout=(10, 180))\n except requests.exceptions.RequestException as e:\n raise Exception('CDX request error: ' + repr(e))\n\n # parse json result\n try:\n index = json.loads(res.content)\n except json.JSONDecodeError as e:\n raise Exception('Error decoding CDX json response: ' + repr(e))\n\n # latest snapshots of files\n latest = dict()\n\n if len(index) > 0:\n del index[0]\n\n for capture in index:\n urlkey = capture[0]\n\n if urlkey in latest:\n if capture[1] > latest[urlkey][1]:\n latest[urlkey] = capture[1:]\n else:\n latest[urlkey] = capture[1:]\n else:\n latest[urlkey] = capture[1:]\n return latest\n", "sub_path": "src/cdx_api.py", "file_name": "cdx_api.py", "file_ext": "py", "file_size_in_byte": 2078, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "collections.namedtuple", "line_number": 6, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 46, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 47, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 52, "usage_type": "call"}, {"api_name": "json.JSONDecodeError", "line_number": 53, "usage_type": "attribute"}]} +{"seq_id": "515828883", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nJoint Cat & Pose model (Weighted) with Geodesic Regression model for the axis-angle representation\n\"\"\"\n\nimport torch\nfrom torch import nn, optim\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\nimport torch.nn.functional as F\n\nfrom dataGenerators import TestImages, my_collate, ImagesAll\nfrom featureModels import resnet_model\nfrom poseModels import model_3layer\nfrom axisAngle import get_error2, geodesic_loss\nfrom helperFunctions import classes, get_accuracy\n\nimport numpy as np\nimport scipy.io as spio\nimport gc\nimport os\nimport time\nimport progressbar\nimport argparse\nfrom tensorboardX import SummaryWriter\n\nparser = argparse.ArgumentParser(description='Geodesic Regression Model')\nparser.add_argument('--gpu_id', type=str, default='0')\nparser.add_argument('--save_str', type=str)\nparser.add_argument('--num_workers', type=int, default=4)\nparser.add_argument('--num_epochs', type=int, default=50)\nparser.add_argument('--db_type', type=str, default='clean')\nparser.add_argument('--init_lr', type=float, default=1e-4)\nargs = parser.parse_args()\nprint(args)\n# assign GPU\nos.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id\n\n# save stuff here\ninit_model_file = os.path.join('models', args.save_str + '_cat.tar')\nmodel_file = os.path.join('models', args.save_str + '_wgt.tar')\nresults_file = os.path.join('results', args.save_str + '_wgt_' + args.db_type)\nplots_file = os.path.join('plots', args.save_str + '_wgt_' + args.db_type)\nlog_dir = os.path.join('logs', args.save_str + '_wgt_' + args.db_type)\n\n# relevant variables\nndim = 3\nN0, N1, N2 = 2048, 1000, 500\nnum_classes = len(classes)\nif args.db_type == 'clean':\n\tdb_path = 'data/flipped_new'\nelse:\n\tdb_path = 'data/flipped_all'\nnum_classes = len(classes)\nreal_path = os.path.join(db_path, 'train')\nrender_path = 'data/renderforcnn'\ntest_path = os.path.join(db_path, 'test')\n\n# loss\nce_loss = nn.CrossEntropyLoss().cuda()\ngve_loss = geodesic_loss().cuda()\n\n# DATA\n# datasets\nreal_data = ImagesAll(real_path, 'real')\nrender_data = ImagesAll(render_path, 'render')\ntest_data = TestImages(test_path)\n# setup data loaders\nreal_loader = DataLoader(real_data, batch_size=args.num_workers, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)\nrender_loader = DataLoader(render_data, batch_size=args.num_workers, shuffle=True, num_workers=args.num_workers, pin_memory=True, collate_fn=my_collate)\ntest_loader = DataLoader(test_data, batch_size=32)\nprint('Real: {0} \\t Render: {1} \\t Test: {2}'.format(len(real_loader), len(render_loader), len(test_loader)))\nmax_iterations = min(len(real_loader), len(render_loader))\n\n\n# my_model\nclass RegressionModel(nn.Module):\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.num_classes = num_classes\n\t\tself.ndim = ndim\n\t\tself.feature_model = resnet_model('resnet50', 'layer4').cuda()\n\t\tself.pose_models = nn.ModuleList([model_3layer(N0, N1, N2, ndim) for i in range(self.num_classes)]).cuda()\n\n\tdef forward(self, x, label):\n\t\tx = self.feature_model(x)\n\t\tx = torch.stack([self.pose_models[i](x) for i in range(self.num_classes)]).permute(1, 2, 0)\n\t\tlabel = torch.zeros(label.size(0), self.num_classes).scatter_(1, label.data.cpu(), 1.0)\n\t\tlabel = Variable(label.unsqueeze(2).cuda())\n\t\ty = torch.squeeze(torch.bmm(x, label), 2)\n\t\ty = np.pi*F.tanh(y)\n\t\tdel x, label\n\t\treturn y\n\n\nclass JointCatPoseModel(nn.Module):\n\tdef __init__(self, oracle_model):\n\t\tsuper().__init__()\n\t\t# old stuff\n\t\tself.num_classes = oracle_model.num_classes\n\t\tself.ndim = oracle_model.ndim\n\t\tself.feature_model = oracle_model.feature_model\n\t\tself.pose_models = oracle_model.pose_models\n\t\tself.fc = nn.Linear(N0, num_classes).cuda()\n\n\tdef forward(self, x):\n\t\tx = self.feature_model(x)\n\t\ty0 = self.fc(x)\n\t\tlabel = torch.unsqueeze(F.softmax(y0, dim=1), dim=2)\n\t\ty1 = torch.stack([self.pose_models[i](x) for i in range(self.num_classes)]).permute(1, 2, 0)\n\t\ty1 = torch.squeeze(torch.bmm(y1, label), 2)\n\t\ty1 = np.pi*F.tanh(y1)\n\t\treturn [y0, y1] # cat, pose\n\n\norig_model = RegressionModel()\nmodel = JointCatPoseModel(orig_model)\nmodel.load_state_dict(torch.load(init_model_file))\n# print(model)\n\n\ndef my_schedule(ep):\n\treturn 1. / (1. + ep)\n\n\noptimizer = optim.Adam(model.parameters(), lr=args.init_lr)\nscheduler = optim.lr_scheduler.LambdaLR(optimizer, my_schedule)\nwriter = SummaryWriter(log_dir)\ncount = 0\nval_err = []\nval_acc = []\n\n\ndef training():\n\tglobal count, val_acc, val_err\n\tmodel.train()\n\tbar = progressbar.ProgressBar(max_value=max_iterations)\n\tfor i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):\n\t\t# forward steps\n\t\t# output\n\t\tlabel_real = Variable(sample_real['label'].squeeze().cuda())\n\t\tydata_real = Variable(sample_real['ydata'].cuda())\n\t\txdata_real = Variable(sample_real['xdata'].cuda())\n\t\toutput_real = model(xdata_real)\n\t\toutput_cat_real = output_real[0]\n\t\typred_real = output_real[1]\n\t\tlabel_render = Variable(sample_render['label'].squeeze().cuda())\n\t\tydata_render = Variable(sample_render['ydata'].cuda())\n\t\txdata_render = Variable(sample_render['xdata'].cuda())\n\t\toutput_render = model(xdata_render)\n\t\toutput_cat_render = output_render[0]\n\t\typred_render = output_render[1]\n\t\tydata = torch.cat((ydata_real, ydata_render))\n\t\t# loss\n\t\tLc_cat = ce_loss(output_cat_real, label_real) # use only real images for category loss\n\t\ty = torch.cat((ypred_real, ypred_render))\n\t\tLr = gve_loss(y, ydata) # gve loss on final pose\n\t\tloss = 0.1*Lc_cat + Lr\n\t\t# parameter updates\n\t\toptimizer.zero_grad()\n\t\tloss.backward()\n\t\toptimizer.step()\n\t\t# store\n\t\tcount += 1\n\t\twriter.add_scalar('train_loss', loss.item(), count)\n\t\tif i % 1000 == 0:\n\t\t\tytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()\n\t\t\tspio.savemat(results_file, {'ytrue_cat': ytrue_cat, 'ytrue_pose': ytrue_pose, 'ypred_cat': ypred_cat, 'ypred_pose': ypred_pose})\n\t\t\ttmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)\n\t\t\ttmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat, num_classes)\n\t\t\twriter.add_scalar('val_acc', tmp_acc, count)\n\t\t\twriter.add_scalar('val_err', tmp_err, count)\n\t\t\tval_acc.append(tmp_acc)\n\t\t\tval_err.append(tmp_err)\n\t\t# cleanup\n\t\tdel label_real, ydata_real, xdata_real, output_real, output_cat_real, ypred_real\n\t\tdel label_render, ydata_render, xdata_render, output_render, output_cat_render, ypred_render\n\t\tdel\tydata, Lc_cat, Lr, loss, y\n\t\tbar.update(i+1)\n\treal_loader.dataset.shuffle_images()\n\trender_loader.dataset.shuffle_images()\n\n\ndef testing():\n\tmodel.eval()\n\tytrue_cat, ytrue_pose = [], []\n\typred_cat, ypred_pose = [], []\n\tfor i, sample in enumerate(test_loader):\n\t\txdata = Variable(sample['xdata'].cuda())\n\t\toutput = model(xdata)\n\t\toutput_cat = output[0]\n\t\toutput_pose = output[1]\n\t\ttmp_labels = np.argmax(output_cat.data.cpu().numpy(), axis=1)\n\t\typred_cat.append(tmp_labels)\n\t\tlabel = Variable(sample['label'])\n\t\tytrue_cat.append(sample['label'].squeeze().numpy())\n\t\typred_pose.append(output_pose.data.cpu().numpy())\n\t\tytrue_pose.append(sample['ydata'].numpy())\n\t\tdel xdata, label, output, sample, output_cat, output_pose\n\t\tgc.collect()\n\tytrue_cat = np.concatenate(ytrue_cat)\n\typred_cat = np.concatenate(ypred_cat)\n\tytrue_pose = np.concatenate(ytrue_pose)\n\typred_pose = np.concatenate(ypred_pose)\n\tmodel.train()\n\treturn ytrue_cat, ytrue_pose, ypred_cat, ypred_pose\n\n\ndef save_checkpoint(filename):\n\ttorch.save(model.state_dict(), filename)\n\n\nytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()\nspio.savemat(results_file, {'ytrue_cat': ytrue_cat, 'ytrue_pose': ytrue_pose, 'ypred_cat': ypred_cat, 'ypred_pose': ypred_pose})\ntmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)\ntmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat, num_classes)\nprint('Acc: {0} \\t Err: {1}'.format(tmp_acc, tmp_err))\n\nfor epoch in range(args.num_epochs):\n\ttic = time.time()\n\tscheduler.step()\n\t# training step\n\ttraining()\n\t# save model at end of epoch\n\tsave_checkpoint(model_file)\n\t# validation\n\tytrue_cat, ytrue_pose, ypred_cat, ypred_pose = testing()\n\tspio.savemat(results_file, {'ytrue_cat': ytrue_cat, 'ytrue_pose': ytrue_pose, 'ypred_cat': ypred_cat, 'ypred_pose': ypred_pose})\n\ttmp_acc = get_accuracy(ytrue_cat, ypred_cat, num_classes)\n\ttmp_err = get_error2(ytrue_pose, ypred_pose, ytrue_cat, num_classes)\n\tprint('Acc: {0} \\t Err: {1}'.format(tmp_acc, tmp_err))\n\twriter.add_scalar('val_acc', tmp_acc, count)\n\twriter.add_scalar('val_err', tmp_err, count)\n\tval_acc.append(tmp_acc)\n\tval_err.append(tmp_err)\n\t# time and output\n\ttoc = time.time() - tic\n\tprint('Epoch: {0} done in time {1}s'.format(epoch, toc))\n\t# cleanup\n\tgc.collect()\nwriter.close()\nval_acc = np.stack(val_acc)\nval_err = np.stack(val_err)\nspio.savemat(plots_file, {'val_acc': val_acc, 'val_err': val_err})\n", "sub_path": "learnJointCatPoseModel3_weighted.py", "file_name": "learnJointCatPoseModel3_weighted.py", "file_ext": "py", "file_size_in_byte": 8671, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 27, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 40, "usage_type": "call"}, {"api_name": "os.path", "line_number": 40, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 41, "usage_type": "call"}, {"api_name": "os.path", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 42, "usage_type": "call"}, {"api_name": "os.path", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 43, "usage_type": "call"}, {"api_name": "os.path", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "helperFunctions.classes", "line_number": 49, "usage_type": "argument"}, {"api_name": "helperFunctions.classes", "line_number": 54, "usage_type": "argument"}, {"api_name": "os.path.join", "line_number": 55, "usage_type": "call"}, {"api_name": "os.path", "line_number": 55, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 57, "usage_type": "call"}, {"api_name": "os.path", "line_number": 57, "usage_type": "attribute"}, {"api_name": "torch.nn.CrossEntropyLoss", "line_number": 60, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 60, "usage_type": "name"}, {"api_name": "axisAngle.geodesic_loss", "line_number": 61, "usage_type": "call"}, {"api_name": "dataGenerators.ImagesAll", "line_number": 65, "usage_type": "call"}, {"api_name": "dataGenerators.ImagesAll", "line_number": 66, "usage_type": "call"}, {"api_name": "dataGenerators.TestImages", "line_number": 67, "usage_type": "call"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 69, "usage_type": "call"}, {"api_name": "dataGenerators.my_collate", "line_number": 69, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 70, "usage_type": "call"}, {"api_name": "dataGenerators.my_collate", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 71, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 77, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 77, "usage_type": "name"}, {"api_name": "featureModels.resnet_model", "line_number": 82, "usage_type": "call"}, {"api_name": "torch.nn.ModuleList", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 83, "usage_type": "name"}, {"api_name": "poseModels.model_3layer", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.stack", "line_number": 87, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 88, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 89, "usage_type": "call"}, {"api_name": "torch.squeeze", "line_number": 90, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 91, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.tanh", "line_number": 91, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 91, "usage_type": "name"}, {"api_name": "torch.nn.Module", "line_number": 96, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 96, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 104, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 104, "usage_type": "name"}, {"api_name": "torch.unsqueeze", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn.functional.softmax", "line_number": 109, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 109, "usage_type": "name"}, {"api_name": "torch.stack", "line_number": 110, "usage_type": "call"}, {"api_name": "torch.squeeze", "line_number": 111, "usage_type": "call"}, {"api_name": "torch.bmm", "line_number": 111, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 112, "usage_type": "attribute"}, {"api_name": "torch.nn.functional.tanh", "line_number": 112, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 112, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 118, "usage_type": "call"}, {"api_name": "torch.optim.Adam", "line_number": 126, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 126, "usage_type": "name"}, {"api_name": "torch.optim.lr_scheduler.LambdaLR", "line_number": 127, "usage_type": "call"}, {"api_name": "torch.optim.lr_scheduler", "line_number": 127, "usage_type": "attribute"}, {"api_name": "torch.optim", "line_number": 127, "usage_type": "name"}, {"api_name": "tensorboardX.SummaryWriter", "line_number": 128, "usage_type": "call"}, {"api_name": "progressbar.ProgressBar", "line_number": 137, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 141, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 142, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 143, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 156, "usage_type": "call"}, {"api_name": "scipy.io.savemat", "line_number": 168, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 168, "usage_type": "name"}, {"api_name": "helperFunctions.get_accuracy", "line_number": 169, "usage_type": "call"}, {"api_name": "axisAngle.get_error2", "line_number": 170, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 189, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 193, "usage_type": "call"}, {"api_name": "torch.autograd.Variable", "line_number": 195, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 200, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 202, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 203, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 204, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 210, "usage_type": "call"}, {"api_name": "scipy.io.savemat", "line_number": 214, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 214, "usage_type": "name"}, {"api_name": "helperFunctions.get_accuracy", "line_number": 215, "usage_type": "call"}, {"api_name": "axisAngle.get_error2", "line_number": 216, "usage_type": "call"}, {"api_name": "time.time", "line_number": 220, "usage_type": "call"}, {"api_name": "scipy.io.savemat", "line_number": 228, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 228, "usage_type": "name"}, {"api_name": "helperFunctions.get_accuracy", "line_number": 229, "usage_type": "call"}, {"api_name": "axisAngle.get_error2", "line_number": 230, "usage_type": "call"}, {"api_name": "time.time", "line_number": 237, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 240, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.stack", "line_number": 243, "usage_type": "call"}, {"api_name": "scipy.io.savemat", "line_number": 244, "usage_type": "call"}, {"api_name": "scipy.io", "line_number": 244, "usage_type": "name"}]} +{"seq_id": "235427644", "text": "import matplotlib.pyplot as plt\r\n\r\n# example of data: scaffolds.faa\r\n\r\npath = input('Input path to file in format fasta')\r\n\r\nlen_list = []\r\nlen_ = 0\r\nwith open(path) as f:\r\n for line in f:\r\n if '>' in line:\r\n if len_ != 0:\r\n len_list.append(len_)\r\n len_ = 0\r\n elif '>' not in line:\r\n len_ += len(line.rstrip())\r\nlen_list.append(len_)\r\n\r\nnum_bins = max(len_list)\r\nn, bins, patches = plt.hist(len_list, num_bins, facecolor='green', alpha=0.5)\r\nplt.xlabel('Length')\r\nplt.ylabel('Count')\r\nplt.title('Distribution of sequences length')\r\n\r\nplt.show()\r\n", "sub_path": "drawing/plt_fasta_plot.py", "file_name": "plt_fasta_plot.py", "file_ext": "py", "file_size_in_byte": 615, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "matplotlib.pyplot.hist", "line_number": 20, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 20, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 22, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 22, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 23, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 23, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 25, "usage_type": "name"}]} +{"seq_id": "73125423", "text": "\"\"\"\ntrajectory.py: Provides LatticeMDTrajectory class extending MDTraj \n Trajectory to provide custom analysis routines\n\nAuthor: Jack Greisman \n\"\"\"\n__author__ = \"Jack Greisman\"\n__version__ = \"1.0\"\n\nimport mdtraj\nimport numpy as np\n\nclass LatticeMDTrajectory(mdtraj.Trajectory):\n \"\"\"\n LatticeMDTrajectory provides methods for the analysis of MD \n simulations of crystal lattices.\n \"\"\"\n def __init__(self, filename):\n\n traj = mdtraj.load(filename)\n super().__init__(traj.xyz, traj.topology, traj.time,\n traj.unitcell_lengths, traj.unitcell_angles)\n\n # Validate unitcell information\n self._validateUnitcell()\n\n def _validateUnitcell(self):\n \"\"\"\n Validate unitcell information is provided and do sanity checks\n \"\"\"\n\n if not self._have_unitcell:\n raise AttributeError('Unitcell information is not provided')\n self._check_valid_unitcell()\n return\n \n def smartWrapMolecule(self, indices):\n \"\"\"\n This function applies periodic wrapping to a given set of atomic\n indices to prevent their center of mass from jumping by a unit\n cell length. Currently, it is assumed that the indices \n correspond to a molecule -- meaning a set of atoms connected by\n bonds.\n\n Parameters\n ----------\n indices : list of ints\n Atomic indices of positions that should be wrapped together\n \"\"\"\n\n # Compute geometric center of coordinates\n coms = self.xyz[:, indices, :].mean(axis=1)\n \n # Compute mask for integer unitcell adjustments\n mask = np.zeros(shape=(self.n_frames, 3))\n \n # X-axis\n x = self.unitcell_lengths[0, 0]\n mask[np.where(coms[:, 0] - coms[0, 0] < -1*x/2)[0], 0] = 1\n mask[np.where(coms[:, 0] - coms[0, 0] > x/2)[0], 0] = -1\n \n # Y-axis\n y = self.unitcell_lengths[0, 1]\n mask[np.where(coms[:, 1] - coms[0, 1] < -1*y/2)[0], 1] = 1\n mask[np.where(coms[:, 1] - coms[0, 1] > y/2)[0], 1] = -1\n \n # Z-axis\n z = self.unitcell_lengths[0, 2]\n mask[np.where(coms[:, 2] - coms[0, 2] < -1*z/2)[0], 2] = 1\n mask[np.where(coms[:, 2] - coms[0, 2] > z/2)[0], 2] = -1\n \n # Update trajectory coordinates\n self.xyz[:, indices, :] += (mask*self.unitcell_lengths).reshape(-1, 1, 3) \n \n return \n\n def smartWrapProtein(self):\n \"\"\"\n Apply smart wrapping independently to each protein molecule in\n the MD system. For now, this method identifies proteins as \n molecules with more than 100 atoms\n \"\"\"\n\n for mol in self.topology.find_molecules():\n if len(mol) > 100:\n indices = [ atom.index for atom in mol ]\n self.smartWrapMolecule(indices)\n \n", "sub_path": "mdtools/analysis/latticemdtrajectory.py", "file_name": "latticemdtrajectory.py", "file_ext": "py", "file_size_in_byte": 2907, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "mdtraj.Trajectory", "line_number": 13, "usage_type": "attribute"}, {"api_name": "mdtraj.load", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 60, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 64, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 69, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 70, "usage_type": "call"}]} +{"seq_id": "12966585", "text": "'''\nCreated on Jul 15, 2018\n\n@author: yung_messiah\n'''\nimport asyncio\nimport bs4\nimport datetime\nimport discord\nimport json\nimport logging\nimport os\nimport pymongo\nimport pytz\nimport random\nimport re\nimport requests\nimport string\nimport time\n\n\nfrom decimal import Decimal\nfrom discord.ext.commands import Bot\nfrom discord.utils import get\nfrom discord.errors import LoginFailure, HTTPException\nfrom discord.embeds import Embed \nfrom threading import Thread\nfrom twilio.rest import Client\n\n# Discord command triggers\nBOT_PREFIX = (\"?\", \"!\")\n# General Discord Bot Description\nBOT_DESCRIPTION = '''**FOMO Helper** is a general service bot for all your consumer needs.\n\nThere are a couple of utility commands which are showcased here, and should serve you well.\n\nTo use all commands, precede the keyword by an exclamation mark (!) or a question mark (?).\n\nExample:\n !gmail example@gmail.com\n OR\n ?gmail example@gmail.com\n\n'''\n\n# Token for Discord Bot \nTOKEN = os.environ[\"FOMO_HELPER_BOT_TOKEN\"]\n# Variables to make calls to Shopify (Subscription related data)\nSHOPIFY_USER = os.environ[\"FOMO_HELPER_SHOPIFY_USER\"]\nSHOPIFY_PASS = os.environ[\"FOMO_HELPER_SHOPIFY_PASS\"]\n# URI for Mongo/Heroku Database\nMONGODB_URI = os.environ[\"FOMO_HELPER_MONGODB_URI\"]\n\nPAYPAL_CLIENT_ID = os.environ[\"FOMO_HELPER_PAYPAL_CLIENT_ID\"]\nPAYPAL_CLIENT_SECRET = os.environ[\"FOMO_HELPER_PAYPAL_CLIENT_SECRET\"]\n\n# Create Discord Bot instance with the given command triggers\nclient = Bot(command_prefix=BOT_PREFIX)#, description=BOT_DESCRIPTION#)\n# Remove the default Discord help command\nclient.remove_command('help')\n# Reference to Mongo/Heroku database\ndb = None\n# Reference to subscriptions collection\nsubscriptions = None\n# Reference to paypal class object\npaypal = None\n\n# Logger for tracking errors.\nlogger = logging.getLogger('discord')\nlogger.setLevel(logging.ERROR)\nhandler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')\nhandler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))\nlogger.addHandler(handler)\n\n# Header to make the requests\nheaders = {'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36'}\n\n# ------------------------------------------------------------- #\n# #\n# Method to make use of TinyURL #\n# # \n# ------------------------------------------------------------- #\n''' Makes use of TinyURL to shorten the URL for the Shopify items\n \n @param url: URL to be shortened\n @param ctx: Discord information\n @return: The shortened url '''\ndef tiny(url, ctx):\n URL = \"https://tinyurl.com/create.php?source=indexpage&url=\" + url + \"&submit=Make+TinyURL%21&alias=\"\n raw_HTML = requests.get(URL, headers=headers, timeout=10)\n \n if raw_HTML.status_code != 200:\n client.send_message(ctx.message.channel, \"An error has occurred completing your request\")\n return None\n else:\n page = bs4.BeautifulSoup(raw_HTML.text, 'lxml')\n return page.find_all('div', {'class': 'indent'})[1].b.string\n \n \n''' Subscribes user to service by adding them to the database and assigning the appropriate role(s).\n\n @param email: The email to be added to the database\n @param author: User responsible for sending authentication message '''\nasync def sub_and_assign_roles(email, author, free, member, monitors):\n # Search for email in database\n data = subscriptions.find_one({\"email\": f\"{email}\"})\n # Reference to the FOMO discord server\n discord_server = client.get_server(\"355178719809372173\")\n # If the email doesn't exist in the database\n if data == None:\n # Insert new user data in the database\n subscriptions.insert({\n \"email\": email,\n \"status\": \"active\",\n \"discord_id\": author.id,\n \"free\": free,\n \"member\": member,\n \"monitors\": monitors\n })\n \n # Assign correct role to user\n if member:\n role = get(discord_server.roles, name=\"Premium\")\n user = discord_server.get_member(author.id)\n await client.add_roles(user, role)\n elif monitors:\n role = get(discord_server.roles, name=\"Monitor\")\n user = discord_server.get_member(author.id)\n await client.add_roles(user, role)\n elif free:\n role = get(discord_server.roles, name=\"Free\")\n user = discord_server.get_member(author.id)\n await client.add_roles(user, role)\n \n # Send message on Discord\n await client.send_message(author, \"Your subscription has been successfully activated!\")\n return True\n else:\n # If an entry exists in the database for the email\n status = data['status']\n # Determine how to proceed based on current status of subscription\n if status == \"active\":\n await client.send_message(author, \"You have already activated your subscription. If you believe this to be a mistake, please contact an admin.\")\n return False\n else:\n # If subscription was canceled and user wants to reactivate it, he needs an admins approval\n # see the !resub command\n if status == \"canceled\":\n await client.send_message(author, \"Your subscription was previously canceled by one of our admins. Please contact one of them to reactivate it.\")\n return False\n else:\n member = str(data[\"member\"])\n monitors = str(data[\"monitors\"])\n free = str(data[\"free\"])\n \n if member == \"True\":\n subscriptions.replace_one({\n \"email\": email\n }, {\n \"email\": email,\n \"status\": \"active\",\n \"discord_id\": author.id,\n \"member\": \"True\",\n \"free\": \"False\",\n \"monitors\": \"False\"\n })\n \n role = get(discord_server.roles, name=\"Premium\")\n user = discord_server.get_member(author.id)\n await client.add_roles(user, role)\n elif monitors == \"True\":\n subscriptions.replace_one({\n \"email\": email\n }, {\n \"email\": email,\n \"status\": \"active\",\n \"discord_id\": author.id,\n \"member\": \"False\",\n \"free\": \"False\",\n \"monitors\": \"True\"\n })\n \n role = get(discord_server.roles, name=\"Monitor\")\n user = discord_server.get_member(author.id)\n await client.add_roles(user, role)\n else:\n subscriptions.replace_one({\n \"email\": email\n }, {\n \"email\": email,\n \"status\": \"active\",\n \"discord_id\": author.id,\n \"member\": \"False\",\n \"free\": \"True\",\n \"monitors\": \"False\"\n })\n \n role = get(discord_server.roles, name=\"Free\")\n user = discord_server.get_member(author.id)\n await client.add_roles(user, role)\n \n \n await client.send_message(author, \"Your subscription has been reactivated!\")\n return True\n# ------------------------------------------------------------- #\n# #\n# All the Discord Bot methods #\n# # \n# ------------------------------------------------------------- #\n''' Method triggered by server event when a member leaves the Discord group \n\n @param member: User leaving the server. '''\n@client.event\nasync def on_member_remove(member):\n # Search for user data in database\n data = subscriptions.find_one({\"discord_id\": f\"{member.id}\"})\n # Take no actions if no data found in database\n if data == None:\n pass\n else:\n # Switch user's subscription status\n status = data[\"status\"]\n \n if status == \"canceled\":\n pass\n else:\n for role in member.roles:\n if \"Premium\" or \"Monitor\" or \"Free\" in role.name:\n result = subscriptions.update_one({\n \"discord_id\": member.id\n }, {\n \"$set\": {\n \"status\": \"disabled\"\n }\n }, upsert=False)\n \n''' Method triggered by server event when a member sends a message in the Discord group\n \n @param message: Message sent by the user in the server '''\n@client.event\nasync def on_message(message):\n # Don't want the bot to reply to itself\n if message.author == client.user:\n return \n \n # Make sure the message sent is not a command\n if not message.content.startswith('!') and not message.content.startswith('?'):\n # Automate responses by displaying specific output based on user message if necessary\n if re.search('nike element react|element react|react 87|react|nike element', message.content, re.IGNORECASE):\n if re.search('sitelist', message.content, re.IGNORECASE):\n await client.send_message(message.channel, 'Nike Element React sitelist URL: ')\n elif re.search('keyword|kw|kws|keywords', message.content, re.IGNORECASE):\n await client.send_message(message.channel, 'Nike Element React keywords: +react, +element, +87')\n elif re.search('raffle|raffles', message.content, re.IGNORECASE):\n await client.send_message(message.channel, 'Updated list in <#471089859034087434>, don\\'t forget to enter! Open raffles can also be found on ')\n elif re.search('pharrell afro pack|pharrell afro|afro pack|pharrell afro hu|afro hu|pharrell hu', message.content, re.IGNORECASE):\n if re.search('raffle|raffles', message.content, re.IGNORECASE):\n await client.send_message(message.channel, 'Updated list in <#471089859034087434>, don\\'t forget to enter! Open raffles can also be found on ') \n elif re.search('slots', message.content, re.IGNORECASE):\n if re.search('guide|how\\s+do|work|what\\s+are|how\\s+to|sign\\s+up|submit', message.content, re.IGNORECASE):\n await client.send_message(message.channel, 'You can find a detailed explanation on how slots work in <#471003962854604810> or in the FOMO Guide: ')\n elif re.search('fomo', message.content, re.IGNORECASE):\n if re.search('guide|how\\s+to|works|work|tutorial', message.content, re.IGNORECASE):\n await client.send_message(message.channel, \"FOMO Guide: \")\n else:\n # If it's a command that was sent, process the command normally\n await client.process_commands(message)\n\n''' Admin only function to display all expired premium accounts \n\n @param ctx: Discord information '''\n@client.command(name='expired',\n description='See all the premium members whose PayPal subscription has expired',\n pass_context=True)\nasync def expired_subs(ctx):\n # User using the command\n author = ctx.message.author\n # FOMO Discord server reference\n discord_server = client.get_server(\"355178719809372173\")\n # Author and Member are considered different things on Discord\n member = discord_server.get_member(author.id)\n # If admin role exists for the member, run correct process\n if \"Admin\" in [role.name for role in member.roles]:\n await paypal.paypal_observer(author)\n else:\n await client.send_message(author, \"This command is for Admins only\")\n\n''' Function for personal use; check if any other Discord server got access to FOMO Helper,\n and prevent them from freely using our bot \n \n @param ctx: Discord information '''\n@client.command(name='connectedservers',\n description='Displays a list of servers the bot is connected to.',\n pass_context=True)\nasync def servers_list(ctx):\n author = ctx.message.author\n servers = client.servers\n message = \"The connected servers are:\\n\"\n for server in servers:\n message += f\"\\t- {server.name}: {server.id}\\n\"\n \n await client.send_message(author, message)\n\n''' Complement to connectedservers command. Removes FOMO Helper from any unauthorized servers\n using the bot. \n \n @param ctx: Discord information\n @param *args: Developer email and unauthorized server id to remove bot service from '''\n@client.command(name='unauthorizeserver',\n description='Removes bot from any unauthorized servers.',\n pass_context=True)\nasync def remove_from_server(ctx, *args):\n author = ctx.message.author\n \n if len(args) < 2:\n await client.send_message(author, \"Command is missing an argument\")\n elif len(args) > 2: \n await client.send_message(author, \"Command has extra argument(s).\")\n else:\n email = args[0]\n id = args[1]\n \n if email == \"macewandu@hotmail.com\":\n server_to_leave = client.get_server(str(id))\n await client.leave_server(server_to_leave)\n await client.send_message(author, \"Successfully left the server\")\n else:\n await client.send_message(author, \"Invalid argument passed\")\n\n''' Discord event, triggered upon successful Login '''\n@client.event\nasync def on_ready():\n print('Logged in as')\n print(client.user.name)\n print(client.user.id)\n print('------')\n \n''' Command used by admins to grant user's permission to resubscribe to the Discord group\n if their subscription was previously cancelled (NOTE - cancelled subscription is different,\n from a disabled subscription.) \n \n @param ctx: Discord information \n @param *args: Email and subscription type passed by user '''\n@client.command(name='resub',\n description='Gives a member back his subscription if they had their subscription canceled',\n pass_context=True)\nasync def resub(ctx, *args):\n # Message author\n author = ctx.message.author \n # FOMO Discord server reference\n discord_server = client.get_server(\"355178719809372173\")\n # Member reference for user \n member = discord_server.get_member(author.id)\n \n # Make sure message is a private message to FOMO Helper\n if isinstance(ctx.message.channel, discord.PrivateChannel):\n # Make sure an admin is using the command\n if \"Admin\" in [role.name for role in member.roles]:\n # Check for correct number of parameters passed\n if len(args) < 2:\n await client.send_message(author, \"Command is missing an argument. Make sure you provide the shopify email and the role to be given\")\n elif len(args) > 2:\n await client.send_message(author, \"Command has extra argument(s). Make sure you provide the shopify email and the role to be given only.\")\n else:\n # Email passed as a parameter\n email = args[0]\n # Subscription type passed as a parameter \n sub = args[1]\n \n # Find user information on database if it exists\n data = subscriptions.find_one({\"email\": f\"{email}\"})\n if data == None:\n await client.send_message(author, \"Could not find the provided email. Please check that it is correct and try again.\")\n else:\n if sub.lower() == \"member\" or sub.lower() == \"free\" or sub.lower() == \"monitors\":\n if sub.lower() == \"member\":\n subscriptions.update_one({\n \"email\": email\n }, {\n \"$set\": {\n \"status\": \"disabled\",\n \"member\": \"True\"\n }\n }, upsert=False)\n elif sub.lower() == \"free\":\n subscriptions.update_one({\n \"email\": email\n }, {\n \"$set\": {\n \"status\": \"disabled\",\n \"free\": \"True\"\n }\n }, upsert=False)\n else:\n subscriptions.update_one({\n \"email\": email\n }, {\n \"$set\": {\n \"status\": \"disabled\",\n \"monitors\": \"True\"\n }\n }, upsert=False)\n \n await client.send_message(author, \"User has been given permission to reactivate their account!\")\n else:\n await client.send_message(author, \"Provided subscription isn't valid. Please make sure it is either **member**, **free** or **monitors**\")\n \n else:\n await client.send_message(author, \"This command is for admins only\")\n \n\n''' Method for admin use only. Cancels a user's subscription and updates the database \n\n @param ctx: Discord information\n @param email: Email associated to acount to cancel subscription for'''\n@client.command(name='cancel',\n description='Cancel a user\\'s subscription',\n pass_context=True)\nasync def cancel(ctx, email):\n # FOMO Discord server reference\n discord_server = client.get_server(\"355178719809372173\")\n # Message author\n author = ctx.message.author \n # Discord member reference based on user id\n member = discord_server.get_member(author.id)\n \n # If message is a private message \n if isinstance(ctx.message.channel, discord.PrivateChannel):\n # Check if member is an admin\n if \"Admin\" in [role.name for role in member.roles]:\n data = subscriptions.find_one({\"email\": f\"{email}\"})\n if data == None:\n await client.send_message(author, \"Could not find the provided email. Please check that it is correct and try again.\")\n else:\n subscriptions.update_one({\n \"email\": email\n }, {\n \"$set\": {\n \"status\": \"canceled\",\n \"member\": \"False\",\n \"free\": \"False\",\n \"monitors\": \"False\"\n }\n })\n \n user_id = data[\"discord_id\"]\n user = discord_server.get_member(user_id)\n monitor_role = get(discord_server.roles, name='Monitor')\n member_role = get(discord_server.roles, name='Premium')\n free_role = get(discord_server.roles, name=\"Free\")\n await client.remove_roles(user, monitor_role)\n await client.remove_roles(user, member_role)\n await client.remove_roles(user, free_role)\n await client.send_message(author, \"User subscription successfully canceled\")\n else:\n await client.send_message(author, \"This command is for admins only\") \n\n\n''' Command responsible for authenticating a users free subscription on Discord and assigning\n appropriate role \n \n @param ctx: Discord information\n @param email: email associated to account to activate subscription for '''\n@client.command(name='free',\n description='Activate your free subscription to be assigned the appropriate roles',\n pass_context=True)\nasync def activate_free(ctx, email):\n # Discord message author\n author = ctx.message.author\n # FOMO Discord server reference\n discord_server = client.get_server(\"355178719809372173\")\n \n # If message is a private message\n if isinstance(ctx.message.channel, discord.PrivateChannel):\n try:\n # Make a request to shopify api to conduct search for the passed email\n customers_req = requests.get(f'https://{SHOPIFY_USER}:{SHOPIFY_PASS}@fomosuptest.myshopify.com/admin/customers/search.json?query=email:{email}', timeout=10)\n \n if customers_req.status_code != 200:\n await client.send_message(author, \"An error has occurred completing your request\")\n return\n else:\n customers_resp = customers_req.json()\n# print(customers_resp)\n valid_email = len(customers_resp['customers'])\n# print(\"VALID EMAIL: \" + str(valid_email))\n if valid_email == 0:\n await client.send_message(author, \"This email is invalid. Make sure you use the email you used to create an account on our FOMO website.\")\n return\n else:\n customer = customers_resp['customers'][0]\n if customer.get(\"orders_count\") <= 0:\n pass\n else:\n customer_id = str(customer.get(\"id\"))\n customer_last_order_id = str(customer.get(\"last_order_id\"))\n \n last_order_req = requests.get(f'https://{SHOPIFY_USER}:{SHOPIFY_PASS}@fomosuptest.myshopify.com/admin/orders/{customer_last_order_id}.json', timeout=10)\n if last_order_req.status_code != 200:\n await client.send_message(author, \"An error has occurred\")\n return\n else:\n order_resp = last_order_req.json()\n# print(order_resp)\n is_free = re.search(\"line_items':.*title':\\s'(discord beta limited access \\(free\\).*)',\\s'quantity\", str(order_resp).lower())\n if is_free == None:\n orders_req = requests.get(f'https://{SHOPIFY_USER}:{SHOPIFY_PASS}@fomosuptest.myshopify.com/admin/customers/{customer_id}/orders.json', timeout=10)\n if orders_req.status_code != 200:\n await client.send_message(author, \"An error has occurred\")\n return\n else:\n orders_resp = orders_req.json()\n is_free = re.search(\"line_items':.*title':\\s'(discord beta limited access \\(free\\).*)',\\s'quantity\", str(orders_resp).lower())\n if is_free == None:\n await client.send_message(author, \"You do not have a subscription. If you believe this to be a mistake, please contact an admin.\")\n return\n else:\n await sub_and_assign_roles(email, author, True, False, False)\n return\n else:\n await sub_and_assign_roles(email, author, True, False, False)\n return \n except requests.Timeout as error:\n print(\"There was a timeout error\")\n print(str(error))\n except requests.ConnectionError as error:\n print(\"A connection error has occurred. The details are below.\\n\")\n print(str(error))\n except requests.RequestException as error:\n print(\"An error occurred making the internet request.\")\n print(str(error))\n\n''' Command responsible for authenticating users premium subscription on Discord and \n assigning correct role '''\n@client.command(name='premium',\n description='Activate your premium subscription to be assigned the appropriate roles',\n pass_context=True)\nasync def activate_premium(ctx, email):\n # Discord message author \n author = ctx.message.author\n # FOMO Discord server reference \n discord_server = client.get_server(\"355178719809372173\")\n \n # Check if message is a private message\n if isinstance(ctx.message.channel, discord.PrivateChannel):\n try:\n await paypal.check_membership(ctx, email)\n\n except requests.Timeout as error:\n print(\"There was a timeout error\")\n print(str(error))\n except requests.ConnectionError as error:\n print(\"A connection error has occurred. The details are below.\\n\")\n print(str(error))\n except requests.RequestException as error:\n print(\"An error occurred making the internet request.\")\n print(str(error))\n \n''' Discord custom help command, formatted differently from the default help command\n\n @param ctx: Discord information\n @param *command: List of arguments passed with the command ''' \n@client.command(name='help',\n description='Help message to guide the user through using the bot.',\n pass_context=True)\nasync def custom_help(ctx, *command):\n author = ctx.message.author\n \n if len(command) == 0:\n embed = Embed(\n color = 0xffffff,\n description = BOT_DESCRIPTION\n )\n \n keywords = '**!address** \\n**!gmail** \\n**!atc** \\n**!isshopify** \\n**!fee** \\n**!free** \\n**!premium**'\n keyword_descriptions = 'Jig your home address; type input between **\" \"**\\n'\n keyword_descriptions += 'Jig your gmail address\\n'\n keyword_descriptions += 'Generate ATC for a shopify URL\\n'\n keyword_descriptions += 'Checks if a website is Shopify\\n'\n keyword_descriptions += 'Calculates seller profit after fees for a given sale price\\n'\n keyword_descriptions += 'Authenticates Free Members and assigns correct role\\n'\n keyword_descriptions += 'Authenticates Premium Members and assigns correct role\\n'\n \n embed.add_field(name='Keywords:', value=keywords, inline=True)\n embed.add_field(name='Brief:', value=keyword_descriptions, inline=True)\n embed.add_field(name='More Info', value=\"For more information on a keyword, type **!help keyword**\", inline=False)\n await client.send_message(author, embed=embed) \n elif (len(command) > 0 and (command[0] == 'gmail' or command[0] == 'mail' or command[0] == 'email')):\n desc = 'This command manipulates any gmail address passed to it as a parameter.'\n embed = Embed(\n color = 0xffffff,\n description = desc\n )\n embed.add_field(name='Aliases', value='[ gmail | mail | email ]', inline=False)\n await client.send_message(author, embed=embed)\n elif (len(command) > 0 and (command[0] == 'address' or command[0] == 'adr' or command[0] == 'addr')):\n desc = 'This command manipulates any residential address passed to it as a parameter.'\n embed = Embed(\n color = 0xffffff,\n description = desc\n )\n embed.add_field(name='Aliases', value='[ address | addr | adr ]', inline=False)\n await client.send_message(author, embed=embed)\n elif (len(command) > 0 and (command[0] == 'atc')):\n desc = 'Add To Cart command for any Shopify website. Generates a link leading the user '\n desc += 'straight to the payment page. Takes in the item\\'s URL as a parameter'\n embed = Embed(\n color = 0xffffff,\n description = desc\n )\n embed.add_field(name='Aliases', value='[ atc ]', inline=False)\n await client.send_message(author, embed=embed)\n elif (len(command) > 0 and (command[0] == 'isshopify')):\n desc = 'This command uses a given URL in order to determine whether '\n desc += 'a website is a shopify site or not.'\n embed = Embed(\n color = 0xffffff,\n description = desc\n )\n embed.add_field(name='Aliases', value='[ isshopify ]', inline=False)\n await client.send_message(author, embed=embed)\n elif (len(command) > 0 and (command[0] == 'fee')):\n desc = \"Calculates the seller fees applied by different websites.\"\n embed = Embed(\n color = 0xffffff,\n description = desc\n )\n \n embed.add_field(name='Aliases', value='[ fee ]', inline=False)\n await client.send_message(author, embed=embed)\n elif (len(command) > 0 and (command[0] == 'free')):\n desc = \"Authenticates new Free Members of the group in the database \"\n desc += \"and assigns correct role(s) so they can have access to all the correct content. \"\n desc += \"The parameter for this command is the email used on the FOMO website.\"\n embed = Embed(\n color = 0xffffff,\n description = desc\n )\n embed.add_field(name='Aliases', value='[ free ]', inline=False)\n await client.send_message(author, embed=embed)\n elif (len(command) > 0 and (command[0] == 'premium')):\n desc = \"Authenticates new Premium Members of the group in the database \"\n desc += \"and assigns correct role(s) so they can have access to all the correct content. \"\n desc += \"The parameter for this command is the email used to make your PayPal payments.\"\n embed = Embed(\n color = 0xffffff,\n description = desc\n )\n embed.add_field(name='Aliases', value='[ premium ]', inline=False)\n await client.send_message(author, embed=embed)\n \n \n''' Discord command to calculate the fees that are applied to sale products on multiple websites.\n\n @param ctx: Discord information\n @param sale_price: Price for which to make the calculations'''\n@client.command(name='fee',\n description='Calculates the seller fees applied by different websites',\n pass_context=True)\nasync def fee_calculator(ctx, sale_price):\n # List of websites \n sites = []\n # Discord channel on which command was called\n channel = ctx.message.channel\n \n # Simple check for a monetary value\n if re.match('^\\d+(\\.\\d*)*$', sale_price) == None:\n await client.send_message(channel, 'The value given is not a proper monetary value')\n else:\n price = Decimal(sale_price)\n price = round(price, 2)\n \n # Tuple format\n # - site title\n # - fee percentage\n # - fixed fee (0 if none)\n ebay = ('eBay', 0.129, 0.00)\n grailed = ('Grailed', 0.089, 0.30)\n paypal = ('PayPal', 0.029, 0.30)\n goat = ('Goat', 0.095, 5.00)\n stockx = ('StockX', 0.120, 0.00)\n shopify = ('Basic Shopify', 0.029, 0.30)\n sites.append(ebay)\n sites.append(grailed)\n sites.append(paypal)\n sites.append(goat)\n sites.append(stockx)\n sites.append(shopify)\n \n websites = ''\n fees = ''\n profits = ''\n # For each site, format information for display on Discord\n for i in sites:\n websites += i[0] + '\\n'\n fee = round(price * Decimal(i[1]), 2)\n fees += '$' + str(round(fee + Decimal(i[2]), 2)) + '\\n'\n after_fee = round(price - fee - Decimal(i[2]), 2)\n profits += '$' + str(after_fee) + '\\n'\n \n embed = Embed(color = 0x008f00)\n embed.add_field(name='Website', value=websites, inline=True)\n embed.add_field(name='Fee', value=fees, inline=True)\n embed.add_field(name='Profit After Fees', value=profits, inline=True)\n \n await client.send_message(channel, embed=embed)\n\n\n''' Discord command to check if a specific website is a Shopify website\n \n @param ctx: Discord information\n @param url: URL to be checked ''' \n@client.command(name='isshopify',\n description='This command uses a given URL in order to determine whether a website is a shopify site or not.',\n pass_context=True)\nasync def shopify_check(ctx, url):\n shopify = Shopify()\n await shopify.check_if_shopify(ctx, url)\n\n''' Discord command to Jig a specific gmail address.\n\n @param ctx: Discord information\n @param email: Email to be jigged '''\n@client.command(name='gmail',\n description='This command manipulates any gmail address passed to it as a parameter.',\n aliases=['mail', 'email'],\n pass_context=True)\nasync def gmail_jig(ctx, email):\n gmail = GmailJig()\n await gmail.run(str(email), ctx)\n \n\n''' Discord command to Jig a specific residential address.\n\n @param ctx: Discord information\n @param adr: Residential address to be jigged ''' \n@client.command(name='address',\n description='This command manipulates any residential address passed to it as a parameter.',\n aliases=['addr', 'adr'],\n pass_context=True)\nasync def address_jig(ctx, adr):\n address = AddressJig()\n await address.generate_address_two(str(adr), ctx)\n\n''' Discord command to generate Add to Cart links for Shopify Websites.\n\n @param ctx: Discord information\n @param url: URL for item to be purchased ''' \n@client.command(name='atc',\n description='Add To Cart command for any Shopify website. Generates a link leading the user ' +\n 'straight to the payment page. Takes in the item\\'s URL as a parameter',\n pass_context=True)\nasync def add_to_cart(ctx, url):\n shopify = Shopify()\n await client.send_message(ctx.message.channel, ':hourglass: Retrieving sizes. Please wait...')\n await shopify.run(str(url), ctx)\n\n''' Discord command for eBay views: limited to 20 views one command \n\n @param url: URL for eBay listing '''\n@client.command(name='ebayview', \n description='Automatic eBay viewer for any listing. Views the given URL 20 times',\n pass_context=True)\nasync def ebay_view(ctx, url):\n await client.send_message(ctx.message.channel, ':hourglass: Starting bot. Please wait...')\n proxy_list = open('proxies.txt', 'r')\n proxysplit = proxy_list.read().splitlines()\n i = 0\n product_url = url\n while i < 20:\n session = requests.Session()\n headers = {\n 'Host': 'www.ebay.com',\n 'Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',\n 'DNT': '1',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7'\n }\n proxypuller = random.choice(proxysplit)\n proxy = {\n 'https' : proxypuller\n }\n try:\n print('view #' + str(i) + ' with ' + str(proxy))\n a = session.get(str(product_url), headers=headers, proxies=proxy)\n i = i+1\n except:\n print('error')\n pass\n i = i+1\n await client.send_message(ctx.message.channel, 'Link ' + str(product_url) + ' viewed 20 times')\n \n# ------------------------------------------------------------- #\n# #\n# Individual classes to represent the different functionalities #\n# that the Discord bot will have #\n# # \n# ------------------------------------------------------------- #\nclass GmailJig(object):\n emails = ''\n \n ''' Kickstarts the Gmail Jigging Process.\n \n @param email: Email to be jigged\n @param ctx: Discord information '''\n async def run(self, email, ctx):\n if email.replace(' ', '') == \"\":\n await client.send_message(ctx.message.author,\"Empty input given. Please try again\")\n else:\n await self.email_check(email, ctx)\n \n ''' Checks for a correct email (google email address only).\n \n @param email: Email to be checked\n @param ctx: Discord information '''\n async def email_check(self, email, ctx):\n # Make sure gmail address was passed\n verified = re.search('(@gmail.+)', email)\n if verified == None:\n await client.send_message(ctx.message.author,\"Invalid email address. Please use a @gmail address\")\n else:\n # Store email provider/second part of email address -> @gmail...\n email_suffix = verified.group()\n # Store first part of email\n prefix = email.replace(email_suffix, '')\n # Make sure first part of email is of a reasonable length for jigging\n if len(prefix) > 2:\n await self.jig_email(prefix, email_suffix, ctx)\n else:\n await client.send_message(ctx.message.author,\"Your email is not long enough. Please try another email\")\n \n ''' Jigs a given gmail address.\n \n @param email_prefix: Everything before the @ sign\n @param email_suffix: Everything after the @ sign, @ sign included\n @param ctx: Discord information ''' \n async def jig_email(self, email_prefix, email_suffix, ctx):\n # Keeps track of indices where period was applied\n used_indeces = []\n # Keeps track of indices neighboring an existing period + periods location\n email_dot_indeces = []\n # length of email prefix\n last_index = len(email_prefix) - 1\n limit = 6 \n stop = 0\n \n # Go through the prefix\n for index, character in enumerate(email_prefix):\n # If there is a dot anywhere in the prefix already\n if character == '.':\n # Keep track of its location and adjacent indexes\n email_dot_indeces.append(index)\n if index-1 not in email_dot_indeces:\n email_dot_indeces.append(index-1)\n \n if (index + 1) < last_index and (index + 1) not in email_dot_indeces:\n email_dot_indeces.append(index+1)\n # Limit the number of items to be displayed to the user \n if limit < last_index - len(email_dot_indeces):\n stop = limit\n else:\n stop = (last_index - len(email_dot_indeces)) + 1\n # Randomly get an integer to serve as index to insert a dot \n for i in range(1,stop):\n r = random.randint(1, last_index)\n # Make sure index is not already used\n if r not in used_indeces and r not in email_dot_indeces:\n used_indeces.append(r)\n else:\n while r in used_indeces or r in email_dot_indeces:\n r = random.randint(1, last_index)\n \n used_indeces.append(r)\n \n count = 0\n # Go through all the indeces to be used\n for i in used_indeces:\n # Add only 1 dot to email\n email_var = email_prefix[:i] + '.' + email_prefix[i:]\n self.emails += email_var + email_suffix + '\\n'\n\n # Add 2 dots for variety\n if i == used_indeces[-1]:\n smaller = i if i < used_indeces[0] else used_indeces[0]\n larger = i if i > used_indeces[0] else used_indeces[0]\n \n email_var = email_prefix[:smaller] + '.' + email_prefix[smaller:larger] + '.' + email_prefix[larger:]\n else:\n smaller = i if i < used_indeces[count+1] else used_indeces[count+1]\n larger = i if i > used_indeces[count+1] else used_indeces[count+1]\n \n email_var = email_prefix[:smaller] + '.' + email_prefix[smaller:larger] + '.' + email_prefix[larger:]\n \n self.emails += email_var + email_suffix + '\\n'\n count += 1\n \n embed = Embed(title=\"\", color=0xff2600)\n embed.add_field(name='Jigged Gmail', value=self.emails, inline=True)\n\n await client.send_message(ctx.message.author, embed=embed)\n\n\nclass AddressJig(object):\n addresses = ''\n \n ''' Generates the 4 character code to be added to beginning of address. \n \n @return: 4 Character code to be added to beginning of address '''\n def generate_code(self):\n code = ''\n for i in range(0,4):\n code += random.choice(string.ascii_uppercase)\n \n return code\n \n ''' Generates the 2nd part of the address\n \n @param address: The address passed by the user\n @param ctx: Discord information ''' \n async def generate_address_two(self, address, ctx):\n if address.replace(' ', '') == '':\n await client.send_message(ctx.message.author,\"Please enter a valid address.\")\n else:\n address_options = ['Apt', 'Apartment', 'Building', 'Bldg', 'Suite', 'Room', 'Condo', 'Unit']\n \n for i in range(1,16):\n index = random.randint(0, len(address_options)-1)\n address_2 = address_options[index]\n \n code = self.generate_code()\n num = 0\n if address_2 == 'Unit':\n num = random.randint(5, 100)\n else:\n num = random.randint(15, 500)\n \n self.addresses += code + ' ' + address + ' ' + address_2 + ' ' + str(num) + '\\n'\n \n embed = Embed(title=\"\", color=0xff9300)\n embed.add_field(name='Jigged Addresses', value=self.addresses, inline=True)\n \n await client.send_message(ctx.message.author, embed=embed)\n \n \nclass Shopify(object):\n # Shopify ATC related variables\n sizes = ''\n atc_links = ''\n \n ''' Checks whether a given url is a Shopify website or not.\n \n @param ctx: Discord information \n @param url: The URL to check for Shopify status ''' \n async def check_if_shopify(self, ctx, url):\n channel = ctx.message.channel\n \n # Ensure url starts with https:// in case url only contains www....\n url_formatting = re.match('https://', url)\n if url_formatting == None:\n url = 'https://' + url\n try:\n raw_HTML = requests.get(url, headers=headers, timeout=10)\n if raw_HTML.status_code != 200:\n await client.send_message(channel, \"An error has occurred completing your request.\")\n else:\n # Search for a specific script that exists in all shopify websites\n page = bs4.BeautifulSoup(raw_HTML.text, 'lxml')\n script = page.find_all('script')\n for i in script:\n # If script is found, we know it's a Shopify website\n if 'shopify' in str(i).lower():\n await client.send_message(channel, \"It IS a Shopify website!\")\n return\n await client.send_message(channel, 'It IS NOT a Shopify website!')\n except requests.Timeout as error:\n logger.error('Timeout Error: %s', str(error))\n await client.send_message(channel, \"There was a timeout error\")\n except requests.ConnectionError as error:\n logger.error('Connection Error: %s', str(error))\n await client.send_message(channel, \"A connection error has occurred.\")\n except requests.RequestException as error:\n logger.error('Request Error: %s', str(error))\n await client.send_message(channel, \"An error occurred making the internet request.\")\n \n \n ''' Retrieves sizes for item in stock.\n \n @param url: The url passed by the user pointing to the item he/she wants\n @param ctx: Discord information ''' \n async def get_sizes(self, url, ctx):\n # Ensure url starts with https:// in case url only contains www....\n url_formatting = re.match('https://', url)\n if url_formatting == None:\n url = 'https://' + url\n try:\n raw_HTML = requests.get(url, headers=headers, timeout=10)\n if raw_HTML.status_code != 200:\n await client.send_message(ctx.message.channel,\"An error has occurred completing your request\")\n return \n else:\n page = bs4.BeautifulSoup(raw_HTML.text, 'lxml')\n# print(page.title.string)\n await self.get_size_variant(url, page, ctx)\n return\n except requests.Timeout as error:\n logger.error('Timeout Error: %s', str(error))\n await client.send_message(ctx.message.channel,\"There was a timeout error\")\n except requests.ConnectionError as error:\n logger.error('Connection Error: %s', str(error))\n await client.send_message(ctx.message.channel,\"A connection error has occurred.\")\n except requests.RequestException as error:\n logger.error('Request Error: %s', str(error))\n await client.send_message(ctx.message.channel,\"An error occurred making the internet request.\")\n \n ''' Retrieves only the absolute URL from passed in URL.\n \n @param url: The address passed in by the user\n @return: absolute url retrieved from given url '''\n def get_absolute_url(self, url):\n absolute_url = re.match('https://', url)\n if absolute_url == None:\n absolute_url = re.match('[a-zA-Z0-9.-]+/', url)\n if absolute_url == None:\n return False\n absolute_url = absolute_url.group()\n return absolute_url\n else:\n absolute_url = re.match('https://[a-zA-Z0-9.-]+/', url)\n absolute_url = absolute_url.group()\n return absolute_url\n \n ''' Retrieves the thumbnail image for the item requested.\n \n @param page: HTML containing information to be scraped for image URL\n @return: url for thumbnail image to be displayed on Discord or None if not found'''\n def get_thumbnail_image(self, page):\n correct_image = None\n img = page.find_all('img')\n for i in img:\n if 'products' in str(i):\n correct_image = str(i)\n break\n \n if correct_image == None:\n return None\n else:\n item_image_url = re.search('cdn\\.shopify.+\\\"', correct_image)\n if item_image_url == None:\n return None\n else:\n item_image_url = item_image_url.group()\n item_image_url = item_image_url.split(' ')\n url = \"https://\"\n url += item_image_url[0].replace('\"','')\n return url\n \n ''' Retrieves the id associated to the item size (required to create a link). \n \n @param url: The item's url \n @param page: Page information retrieved through requests\n @param ctx: Discord information '''\n async def get_size_variant(self, url, page, ctx):\n scripts = page.find_all(\"script\")\n if scripts == None:\n await client.send_message(ctx.message.channel,\"An error has occurred completing your request. Check that the website is a shopify website.\")\n return\n \n script_index = self.find_variant_script(scripts)\n \n script = scripts[script_index].getText()\n \n ''' split it in this manner to store items of script separated by a new line '''\n script = script.split(';')\n ''' retrieve only the line containing size information '''\n script = script[3]\n ''' split in this manner so that each size is a different list item '''\n script = script.split('{\\\"id\\\":')\n ''' remove unwanted information in beginning of list '''\n script.remove(script[0])\n script.remove(script[0])\n \n status = True \n for item in script:\n if 'public_title\\\":\\\"' in item:\n data = item\n data = data.split(',')\n \n size = data[3].split(\"\\\"\")\n size = size[3]\n# print(size)\n retrieved_id = data[0]\n \n # add leading and trailing spaces to make regex matching easier\n size = \" \" + size + \" \"\n item_size = re.search('\\s\\d+\\.\\d+\\s', str(size))\n if item_size == None:\n item_size = re.search('\\s\\d{1,2}\\s', str(size))\n if item_size == None:\n item_size = re.search('(?i)(XS|X-S|(\\sS\\s|Small)|(\\sM\\s|Medium)|(\\sL\\s|Large)' + \n '|XL|XXL|XXXL|X-L|XX-L|XXX-L)', str(size))\n if item_size == None:\n item_size = size\n \n if item_size != size:\n item_size = item_size.group()\n \n item_size = item_size.replace('\\\\', '')\n item_size = item_size.replace('/', \"\")\n status = await self.print_link(url, item_size, str(retrieved_id), ctx)\n if status == False:\n break\n \n thumbnail_url = self.get_thumbnail_image(page)\n embed = Embed(title=page.title.string, url=url, description=self.atc_links, color=0x00f900)\n embed.set_footer(text=str(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))\n if thumbnail_url == None:\n pass\n else:\n embed.set_thumbnail(url=thumbnail_url)\n\n await client.send_message(ctx.message.channel, embed=embed)\n \n \n ''' Prints a correctly formated link which takes user straight to purchase.\n \n @param url: URL of the item to be bought\n @param size: Size of the given item\n @param retrieved_id: Id associated to the item size\n @param ctx: Discord information \n @return: Whether or not generating links was successful '''\n async def print_link(self, url, size, retrieved_id, ctx):\n absolute_url = self.get_absolute_url(url)\n if absolute_url == False:\n await client.send_message(ctx.message.channel, \"An error has occurred completing your request\")\n return False\n \n self.sizes += size + \"\\n\"\n link = tiny(absolute_url + 'cart/' + retrieved_id + ':1', ctx)\n if link == None:\n return False\n else:\n self.atc_links += '[[ ATC ]](' + link + ') - ' + size + '\\n'\n \n return True\n \n ''' Kickstarts the entire script.\n \n @param url: The url pointing to the item which the user wants to buy\n @param ctx: Discord information '''\n async def run(self, url, ctx):\n await self.get_sizes(url, ctx)\n \n ''' Find the correct script to retrieve information from.\n \n @params scripts: All the scripts retrieved for the page '''\n def find_variant_script(self, scripts): \n for number, script in enumerate(scripts):\n if \"variants\\\":[{\" in script.getText():\n return number\n break\n \n \nclass PayPal(object):\n def __init__(self):\n self.expiration_date = None\n self.access_token = None\n self.page_count = None\n \n ''' Calculates dates to see if it has been more than a month since user's last payment.\n If it has, it means user's subscription has expired \n \n @return: Both current date, and date information for around 1 month before current time '''\n def calculateDates(self): \n END = datetime.datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%S%z')\n start = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) + datetime.timedelta(-30)\n START = start.strftime('%Y-%m-%dT%H:%M:%S%z')\n return (START, END)\n \n ''' Generates access token for PayPal calls if required and stores it along with the expiration date '''\n def generate_token(self):\n token_url = 'https://api.paypal.com/v1/oauth2/token'\n data = {'grant_type': 'client_credentials'}\n access_token_resp = requests.post(token_url, data, verify=True, auth=(PAYPAL_CLIENT_ID, PAYPAL_CLIENT_SECRET))\n\n print(\"Access Token: \")\n print(access_token_resp.headers)\n print(\"Body: \" + access_token_resp.text)\n \n json_data = json.loads(access_token_resp.text)\n ''' Access token for PayPal '''\n self.access_token = json_data['access_token']\n ''' Number of seconds until token expires '''\n expires_in = json_data['expires_in']\n exp_date = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) + datetime.timedelta(seconds=expires_in)\n self.expiration_date = exp_date.strftime('%Y-%m-%dT%H:%M:%S%z')\n print(self.expiration_date)\n \n ''' Checks to see if user has purchased a Premium membership \n \n @param ctx: Discord information\n @param email: Email associated to Discord user to check membership for\n @param param: Index of page currently being looked at for information (pagination)\n @param param: Indicates whether or not it's the observer method that made the call -> SEE paypal_observer()'''\n async def check_membership(self, ctx, email, page=None, observer=False):\n author = ''\n discord_server = client.get_server(\"355178719809372173\")\n \n if observer == True:\n pass\n else: \n author = ctx.message.author\n \n \n access_token = self.access_token\n now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc).strftime('%Y-%m-%dT%H:%M:%S%z')\n \n # Regenerate access token if necessary\n if self.expiration_date == None:\n self.generate_token()\n access_token = self.access_token\n if self.expiration_date < now:\n self.generate_token()\n access_token = self.access_token\n \n # Calculate dates to check for expiration\n dates = self.calculateDates()\n \n params = {}\n count = None\n \n if page == None:\n count = 1\n params = {\n 'start_date':dates[0],\n 'end_date':dates[1],\n 'fields':'all',\n 'page_size':100,\n 'page':1\n }\n else:\n count = page\n params = {\n 'start_date':dates[0],\n 'end_date':dates[1],\n 'fields':'all',\n 'page_size':100,\n 'page':page\n }\n \n transactions_base_url = 'https://api.paypal.com/v1/reporting/transactions'\n api_call_headers = {'Authorization': 'Bearer ' + access_token }\n transactions = requests.get(transactions_base_url, headers=api_call_headers, verify=True, params=params)\n \n if transactions.status_code != 200:\n if observer:\n await client.send_message(discord_server.get_member(\"460997994121134082\"), \"An error has occurred with the PayPal observer\")\n else:\n await client.send_message(author, \"An error has occurred completing your request\")\n return\n else:\n transactions_json = json.loads(transactions.text)\n# print(transactions_json)\n self.page_count = transactions_json['total_pages']\n# print(self.page_count)\n for transaction in transactions_json['transaction_details']:\n if 'email_address' in transaction['payer_info']:\n trans_email = transaction['payer_info']['email_address']\n trans_status = transaction['transaction_info']['transaction_status']\n \n if trans_email == email:\n if trans_status == 'S':\n if transaction['transaction_info']['transaction_amount']['value'] == '20.00':\n if observer:\n return True\n else:\n await sub_and_assign_roles(email, author, False, True, False)\n return\n if transaction['transaction_info']['transaction_amount']['value'] == '15.00':\n if observer:\n return True\n else:\n await sub_and_assign_roles(email, author, False, False, True)\n return \n else:\n if observer:\n return False\n else:\n await client.send_message(author, \"There was a problem with your transaction or it hasn't been fully processed by PayPal.\" \n + \" Please allow PayPal some time to process your payment and try again. If the problem persists, contact one of the admins.\")\n return\n \n if count < self.page_count:\n self.check_membership(ctx, email, count+1)\n else:\n if observer:\n return False\n else:\n await client.send_message(author, \"This email is invalid. Make sure you use the email for the account you used to make your PayPal payment.\")\n return \n ''' Observer to check for expired memberships on PayPal \n \n @param author: User responsible for calling method to check for expired membership ''' \n async def paypal_observer(self, author):\n discord_server = client.get_server(\"355178719809372173\")\n \n await client.send_message(author, ':hourglass: Checking PayPal. Please wait...')\n \n expired_subs= \"Subscriptions for the following emails have either expired or there have been problems with their transaction. Please check PayPal to confirm before handling their subscription.\\n\"\n list = \"\"\n emails = subscriptions.distinct(\"email\")\n for email in emails:\n data = subscriptions.find_one({\"email\": f\"{email}\"})\n if data != None:\n member = data[\"member\"]\n monitors = data[\"monitors\"]\n if member or monitors == \"True\":\n status = await self.check_membership(None, email, None, True)\n if status == False:\n list += f\"\\t- {email}\\n\"\n else:\n continue\n \n if list == \"\":\n await client.send_message(author, \"No subscriptions have expired\")\n else:\n expired_subs += list\n await client.send_message(author, expired_subs)\n \n \n \nif __name__ == \"__main__\": \n ''' Initialize Discord bot by making the first call to it '''\n try:\n db_client = pymongo.MongoClient(MONGODB_URI)\n db = db_client.get_default_database()\n subscriptions = db['subscriptions']\n subscriptions.create_index('email')\n paypal = PayPal()\n\n client.run(TOKEN)\n except (HTTPException, LoginFailure) as e:\n client.loop.run_until_complete(client.logout())\n", "sub_path": "master_bot.py", "file_name": "master_bot.py", "file_ext": "py", "file_size_in_byte": 60552, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "os.environ", "line_number": 47, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 49, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 50, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 52, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 55, "usage_type": "attribute"}, {"api_name": "discord.ext.commands.Bot", "line_number": 58, "usage_type": "call"}, {"api_name": "logging.getLogger", "line_number": 69, "usage_type": "call"}, {"api_name": "logging.ERROR", "line_number": 70, "usage_type": "attribute"}, {"api_name": "logging.FileHandler", "line_number": 71, "usage_type": "call"}, {"api_name": "logging.Formatter", "line_number": 72, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 90, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 96, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 123, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 127, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 131, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 168, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 183, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 198, "usage_type": "call"}, {"api_name": "re.search", "line_number": 249, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 249, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 250, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 250, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 252, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 252, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 254, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 254, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 256, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 256, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 257, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 257, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 259, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 259, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 260, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 260, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 262, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 262, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 263, "usage_type": "call"}, {"api_name": "re.IGNORECASE", "line_number": 263, "usage_type": "attribute"}, {"api_name": "discord.PrivateChannel", "line_number": 356, "usage_type": "attribute"}, {"api_name": "discord.PrivateChannel", "line_number": 428, "usage_type": "attribute"}, {"api_name": "discord.utils.get", "line_number": 448, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 449, "usage_type": "call"}, {"api_name": "discord.utils.get", "line_number": 450, "usage_type": "call"}, {"api_name": "discord.PrivateChannel", "line_number": 474, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 477, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 498, "usage_type": "call"}, {"api_name": "re.search", "line_number": 505, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 507, "usage_type": "call"}, {"api_name": "re.search", "line_number": 513, "usage_type": "call"}, {"api_name": "requests.Timeout", "line_number": 523, "usage_type": "attribute"}, {"api_name": "requests.ConnectionError", "line_number": 526, "usage_type": "attribute"}, {"api_name": "requests.RequestException", "line_number": 529, "usage_type": "attribute"}, {"api_name": "discord.PrivateChannel", "line_number": 545, "usage_type": "attribute"}, {"api_name": "requests.Timeout", "line_number": 549, "usage_type": "attribute"}, {"api_name": "requests.ConnectionError", "line_number": 552, "usage_type": "attribute"}, {"api_name": "requests.RequestException", "line_number": 555, "usage_type": "attribute"}, {"api_name": "discord.embeds.Embed", "line_number": 570, "usage_type": "call"}, {"api_name": "discord.embeds.Embed", "line_number": 590, "usage_type": "call"}, {"api_name": "discord.embeds.Embed", "line_number": 598, "usage_type": "call"}, {"api_name": "discord.embeds.Embed", "line_number": 607, "usage_type": "call"}, {"api_name": "discord.embeds.Embed", "line_number": 616, "usage_type": "call"}, {"api_name": "discord.embeds.Embed", "line_number": 624, "usage_type": "call"}, {"api_name": "discord.embeds.Embed", "line_number": 635, "usage_type": "call"}, {"api_name": "discord.embeds.Embed", "line_number": 645, "usage_type": "call"}, {"api_name": "re.match", "line_number": 667, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 670, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 696, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 697, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 698, "usage_type": "call"}, {"api_name": "discord.embeds.Embed", "line_number": 701, "usage_type": "call"}, {"api_name": "requests.Session", "line_number": 771, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 782, "usage_type": "call"}, {"api_name": "re.search", "line_number": 821, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 868, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 874, "usage_type": "call"}, {"api_name": "discord.embeds.Embed", "line_number": 900, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 915, "usage_type": "call"}, {"api_name": "string.ascii_uppercase", "line_number": 915, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 930, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 936, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 938, "usage_type": "call"}, {"api_name": "discord.embeds.Embed", "line_number": 942, "usage_type": "call"}, {"api_name": "re.match", "line_number": 961, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 965, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 970, "usage_type": "call"}, {"api_name": "requests.Timeout", "line_number": 978, "usage_type": "attribute"}, {"api_name": "requests.ConnectionError", "line_number": 981, "usage_type": "attribute"}, {"api_name": "requests.RequestException", "line_number": 984, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 995, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 999, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 1004, "usage_type": "call"}, {"api_name": "requests.Timeout", "line_number": 1008, "usage_type": "attribute"}, {"api_name": "requests.ConnectionError", "line_number": 1011, "usage_type": "attribute"}, {"api_name": "requests.RequestException", "line_number": 1014, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 1023, "usage_type": "call"}, {"api_name": "re.match", "line_number": 1025, "usage_type": "call"}, {"api_name": "re.match", "line_number": 1031, "usage_type": "call"}, {"api_name": "re.search", "line_number": 1050, "usage_type": "call"}, {"api_name": "re.search", "line_number": 1098, "usage_type": "call"}, {"api_name": "re.search", "line_number": 1100, "usage_type": "call"}, {"api_name": "re.search", "line_number": 1102, "usage_type": "call"}, {"api_name": "discord.embeds.Embed", "line_number": 1117, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 1118, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1118, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 1177, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1177, "usage_type": "attribute"}, {"api_name": "pytz.utc", "line_number": 1177, "usage_type": "attribute"}, {"api_name": "datetime.datetime.utcnow", "line_number": 1178, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1178, "usage_type": "attribute"}, {"api_name": "pytz.utc", "line_number": 1178, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 1178, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 1186, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 1192, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 1197, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1197, "usage_type": "attribute"}, {"api_name": "pytz.utc", "line_number": 1197, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 1197, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 1218, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1218, "usage_type": "attribute"}, {"api_name": "pytz.utc", "line_number": 1218, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 1255, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 1264, "usage_type": "call"}, {"api_name": "pymongo.MongoClient", "line_number": 1337, "usage_type": "call"}, {"api_name": "discord.errors.HTTPException", "line_number": 1344, "usage_type": "name"}, {"api_name": "discord.errors.LoginFailure", "line_number": 1344, "usage_type": "name"}]} +{"seq_id": "399535301", "text": "# Feature Analysis of employee turnover using Machine Learning Approach\n# Importing the libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score\nfrom keras.utils import np_utils\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import (brier_score_loss, precision_score, recall_score,f1_score)\nfrom sklearn.calibration import CalibratedClassifierCV, calibration_curve\nfrom sklearn.model_selection import train_test_split\nfrom SBSAlgorithm1 import SBS\n# Importing the dataset\ndataset = pd.read_csv('HR_comma_sep(SalaryManipulated).csv')\n#preprocessing the data \ndataset['type'] = dataset['type'].factorize()[0] \ndataset['satisfaction_level']=dataset['satisfaction_level']*100\ndataset['last_evaluation']=dataset['last_evaluation']*100\nnewDataset = dataset[['satisfaction_level', 'last_evaluation', 'number_project','average_montly_hours','time_spend_company','Work_accident','promotion_last_5years','type','salary']].copy()\ndef SBSShow():\n sbs = SBS(classifier, k_features=1)\n sbs.fit(X_train, y_train)\n k_feat = [len(k) for k in sbs.subsets_]\n plt.plot(k_feat, sbs.scores_, marker = 'o')\n plt.ylim([0.7,1.1])\n plt.ylabel('accuracy')\n plt.xlabel('Number of features')\n plt.grid()\n plt.show()\n #print(sbs.k_feature_idx_)\n k=list(sbs.subsets_[4])\n print(newDataset.columns[0:][k])\n print('Indices: ', sbs.subsets_)\n print('Scores: ', sbs.scores_)\n #print(newDataset.columns[1:][k])\n# Importing the dataset\n#dataset = pd.read_csv('HR_comma_sep(SalaryManipulated).csv')\n#preprocessing the data \n#dataset['salary'] = dataset['salary'].factorize()[0] \n#dataset['satisfaction_level']=dataset['satisfaction_level']*100\n#dataset['last_evaluation']=dataset['last_evaluation']*100\n#newDataset = dataset[['satisfaction_level', 'last_evaluation', 'number_project','average_montly_hours','time_spend_company','salary']].copy()\n#dataset['salary'] = dataset['salary'].factorize()[0] \n#X = dataset.iloc[:, [0, 1,2,3,4,5,7,8, 9]].values\nX = dataset.iloc[:, [0,1,3,4,9]].values\ny = dataset.iloc[:, 6].values\n\n''' \nfrom sklearn.preprocessing import LabelEncoder,OneHotEncoder\nlabelEncoder_x1 = LabelEncoder()\nX[:,1]=labelEncoder_x1.fit_transform(X[:,1])\nonehotencoder = OneHotEncoder(categorical_features = [1])\n\nlabelEncoder_x2 = LabelEncoder()\nX[:,8]=labelEncoder_x2.fit_transform(X[:,8])\nonehotencoder = OneHotEncoder(categorical_features = [8])\n\nX = onehotencoder.fit_transform(X).toarray()\n'''\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 1)\n'''\nlabelEncoder_y = LabelEncoder()\nlabelEncoder_y.fit(X[1])\nencoded_Y = labelEncoder_y.transform(X[1])\nX[1]=np_utils.to_categorical(encoded_Y)\n\n'''\n'''\n# Feature Scaling\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nX_train = sc.fit_transform(X_train)\nX_test = sc.transform(X_test)\n\nfrom sklearn.preprocessing import RobustScaler\nrb = RobustScaler()\nX_train=rb.fit_transform(X_train)\nX_test = rb.transform(X_test)\n'''\n\nfrom sklearn.preprocessing import MinMaxScaler\nMMS = MinMaxScaler()\nX_train= MMS.fit_transform(X_train)\nX_test = MMS.transform(X_test)\n#Select K best features\n\n'''\nX_train = SelectKBest(chi2, k=6).fit_transform(X_train, y_train)\nX_test = SelectKBest(chi2, k=6).fit_transform(X_test, y_test)\n'''\n#Get the scores of the best features\n# write k= 'all' to get the scores of all the features\nselector = SelectKBest(chi2, k=3).fit(X_train,y_train)\nX_train = selector.transform(X_train) # not needed to get the score\nX_test = selector.transform(X_test)\nscores = selector.scores_\n#Univariate Feature Selcection\n#from sklearn.feature_selection import SelectPercentile, f_classif\n#selector = SelectPercentile(f_classif, percentile=50)\n#selector.fit(X_train, y_train)\n#Now = X_train.shape\n#Applying PCA\n'''\nfrom sklearn.decomposition import PCA\npca = PCA(n_components=2)\nX_train = pca.fit_transform(X_train)\nX_test = pca.transform(X_test)\nexplained_variance = pca.explained_variance_ratio_\n'''\n# Fitting classification methods to the Training set\nfrom sklearn.tree import DecisionTreeClassifier\nclassifier = DecisionTreeClassifier(criterion = 'entropy', random_state = 1)\nclassifier.fit(X_train, y_train)\nSBSShow()\n\n\n# try printing some data from newDF\n\n#again optional \n\nfrom sklearn.ensemble import RandomForestClassifier\nclassifier = RandomForestClassifier(n_estimators=1000,random_state=0)\nclassifier.fit(X_train,y_train)\nfeat_labels = newDataset.columns[0:]\n\n\nforest = RandomForestClassifier(criterion='entropy',n_estimators=1000, random_state=1)\nforest.fit(X_train,y_train)\nimportances = forest.feature_importances_\nindices = np.argsort(importances)[::-1]\nfor f in range(0,5):\n print(\"%2d) %-*s %f\" % (f+1, 30,\n feat_labels[indices[f]],\n importances[indices[f]]))\nprint(X_train.Shape)\nfrom sklearn import svm\nclassifier = svm.SVC(kernel='rbf',gamma=0.1)\nclassifier.fit(X_train,y_train)\n \nclassifier = MLPClassifier()\nclassifier.fit(X_train,y_train)\n\nclassifier = GaussianNB()\nclassifier.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = classifier.predict(X_test)\n# Get the accuracy \nprint ('RF accuracy: TRAINING', classifier.score(X_train,y_train))\nprint ('RF accuracy: TESTING', classifier.score(X_test,y_test))\n\n# Making the Confusion Matrix\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\ndef plot_calibration_curve(est, name, fig_index):\n \"\"\"Plot calibration curve for est w/o and with calibration. \"\"\"\n # Calibrated with isotonic calibration\n #isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')\n\n # Calibrated with sigmoid calibration\n #sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')\n\n # Logistic regression with no calibration as baseline\n # lr = LogisticRegression(C=1., solver='lbfgs')\n\n fig = plt.figure(fig_index, figsize=(10, 10))\n ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)\n ax2 = plt.subplot2grid((3, 1), (2, 0))\n\n ax1.plot([0, 1], [0, 1], \"k:\", label=\"Perfectly calibrated\")\n for clf, name in [(est, name)]:\n clf.fit(X_train, y_train)\n y_pred = clf.predict(X_test)\n if hasattr(clf, \"predict_proba\"):\n prob_pos = clf.predict_proba(X_test)[:, 1]\n else: # use decision function\n prob_pos = clf.decision_function(X_test)\n prob_pos = \\\n (prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())\n\n clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())\n print(\"%s:\" % name)\n print(\"\\tBrier: %1.3f\" % (clf_score))\n print(\"\\tPrecision: %1.3f\" % precision_score(y_test, y_pred))\n print(\"\\tRecall: %1.3f\" % recall_score(y_test, y_pred))\n print(\"\\tF1: %1.3f\\n\" % f1_score(y_test, y_pred))\n\n fraction_of_positives, mean_predicted_value = \\\n calibration_curve(y_test, prob_pos, n_bins=10)\n\n ax1.plot(mean_predicted_value, fraction_of_positives, \"s-\",\n label=\"%s (%1.3f)\" % (name, clf_score))\n\n ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,\n histtype=\"step\", lw=2)\n\n ax1.set_ylabel(\"Fraction of positives\")\n ax1.set_ylim([-0.05, 1.05])\n ax1.legend(loc=\"lower right\")\n ax1.set_title('Calibration plots (reliability curve)')\n\n ax2.set_xlabel(\"Mean predicted value\")\n ax2.set_ylabel(\"Count\")\n ax2.legend(loc=\"upper center\", ncol=2)\n\n plt.tight_layout()\n\n# Plot calibration curve for Gaussian Naive Bayes\n#plot_calibration_curve(DecisionTreeClassifier(criterion = 'entropy', random_state = 1),\"Decision Tree\",1)\n#plot_calibration_curve(RandomForestClassifier(criterion='entropy',n_estimators=1000,random_state=1),\"Random Forest\",2)\n#plot_calibration_curve(svm.SVC(kernel='rbf',gamma=0.1),\"SVM\",3)\nplot_calibration_curve(MLPClassifier(solver='lbfgs', alpha=1e-5,hidden_layer_sizes=(5, 2)),\"MLPClassifier\",4)\n#plot_calibration_curve(GaussianNB(),\"GaussianNB\",5)\n# Plot calibration curve for Linear SVC\n#plot_calibration_curve(LinearSVC(), \"SVC\", 2)\nplt.show()\n\n", "sub_path": "OtherFiles/AlgoCompare.py", "file_name": "AlgoCompare.py", "file_ext": "py", "file_size_in_byte": 8421, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "pandas.read_csv", "line_number": 18, "usage_type": "call"}, {"api_name": "SBSAlgorithm1.SBS", "line_number": 25, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 66, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MinMaxScaler", "line_number": 88, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.SelectKBest", "line_number": 99, "usage_type": "call"}, {"api_name": "sklearn.feature_selection.chi2", "line_number": 99, "usage_type": "argument"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 118, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 128, "usage_type": "call"}, {"api_name": "sklearn.ensemble.RandomForestClassifier", "line_number": 133, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 136, "usage_type": "call"}, {"api_name": "sklearn.svm.SVC", "line_number": 143, "usage_type": "call"}, {"api_name": "sklearn.svm", "line_number": 143, "usage_type": "name"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 146, "usage_type": "call"}, {"api_name": "sklearn.naive_bayes.GaussianNB", "line_number": 149, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 173, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 173, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 174, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 174, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot2grid", "line_number": 175, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 175, "usage_type": "name"}, {"api_name": "sklearn.metrics.brier_score_loss", "line_number": 188, "usage_type": "call"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 191, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 192, "usage_type": "call"}, {"api_name": "sklearn.metrics.f1_score", "line_number": 193, "usage_type": "call"}, {"api_name": "sklearn.calibration.calibration_curve", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "sklearn.neural_network.MLPClassifier", "line_number": 219, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 223, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 223, "usage_type": "name"}]} +{"seq_id": "200345062", "text": "import numpy as np\nimport matplotlib.pyplot as plt\nfrom FuncionesActivacion import FuncionesActivacion\n\nclass RedNeuronalMulticapa(FuncionesActivacion):\n def __init__(self, entradas, salidas, factor_aprendizaje, epocas, activacion='tangente', capas=[2,3,2]):\n #ob = FuncionesActivacion()\n self.nombreactivacion = activacion\n self.activacion = lambda : 0\n self.activacion_prima = lambda : 0\n self.entradas = np.array(entradas)\n self.salidas = np.array(salidas)\n self.factor_aprendizaje = factor_aprendizaje\n self.epocas = epocas\n\n #Iniciarlizar pesos\n self.pesos = []\n self.deltas = []\n # capas = [2,3,2] randon entre 1, -1\n for i in range(1, len(capas) -1):\n r = 2 * np.random.random((capas[i-1] + 1, capas[i] + 1)) -1\n self.pesos.append(r)\n \n #asignar aleatorios a la capa de salida\n r = 2 * np.random.random((capas[i] + 1, capas[i + 1])) - 1\n self.pesos.append(r)\n \n def entrenar(self):\n # print(self.entradas)\n # print(self.salidas)\n self.validaractivacion()\n ones = np.atleast_2d(np.ones(self.entradas.shape[0]))\n self.entradas = np.concatenate((ones.T, self.entradas), axis = 1)\n \n for k in range(self.epocas):\n i = np.random.randint(self.entradas.shape[0])\n a = [self.entradas[i]]\n \n for l in range(len(self.pesos)):\n dot_value = np.dot(a[l], self.pesos[l])\n activacion = self.activacion(dot_value)\n a.append(activacion)\n \n #Calculo la diferencia entre la capa de salida y el valor obtenido\n error = self.salidas[i] - a[-1]\n deltas = [error * self.activacion_prima(a[-1])]\n \n #Empezamos en la segunda capa hasta la ultima\n for l in range(len(a) - 2, 0, -1):\n deltas.append(deltas[-1].dot(self.pesos[l].T) * self.activacion_prima(a[l]))\n self.deltas.append(deltas)\n \n #invertir\n deltas.reverse()\n \n #Backpropagation\n for i in range(len(self.pesos)):\n capa = np.atleast_2d(a[i])\n delta = np.atleast_2d(deltas[i])\n self.pesos[i] += self.factor_aprendizaje * capa.T.dot(delta)\n \n if k % 10000 == 0: print('epocas:', k)\n \n def predecir(self, x):\n unos = np.atleast_2d(np.ones(x.shape[0]))\n a = np.concatenate((np.ones(1).T, np.array(x)), axis = 0)\n for l in range(0, len(self.pesos)):\n a = self.activacion(np.dot(a, self.pesos[l]))\n return a\n\n def obtener_deltas(self):\n return self.deltas\n \n def imprimirResultado(self):\n cadena = \"\"\n print(\"Listado de Pesos de Conexiones\")\n for i in range(len(self.pesos)):\n cadena += str(self.pesos[i]) + \"\\n\"\n return cadena\n\n def generarGrafico(self):\n deltas = self.obtener_deltas()\n valores = []\n index = 0\n for arreglo in deltas:\n valores.append( arreglo[1][0] + arreglo[1][1] )\n index = index + 1\n\n plt.plot( range( len( valores ) ), valores, color='b' )\n plt.ylim( [0, 1] )\n plt.ylabel( 'Costo' )\n plt.xlabel( 'Epocas' )\n plt.tight_layout()\n plt.show()\n\n def validaractivacion(self):\n if self.nombreactivacion == 'sigmoide':\n self.activacion = self.sigmoide\n self.activacion_prima = self.sigmoide_derivado\n elif self.nombreactivacion == 'tangente':\n self.activacion = self.tangente\n self.activacion_prima = self.tangente_derivada", "sub_path": "Python/IA/RedNeuronalMulticapa.py", "file_name": "RedNeuronalMulticapa.py", "file_ext": "py", "file_size_in_byte": 3774, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "FuncionesActivacion.FuncionesActivacion", "line_number": 5, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 12, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 21, "usage_type": "attribute"}, {"api_name": "numpy.random.random", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.atleast_2d", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 33, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 36, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 36, "usage_type": "attribute"}, {"api_name": "numpy.dot", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 58, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 59, "usage_type": "call"}, {"api_name": "numpy.atleast_2d", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 65, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 66, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 89, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 89, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylim", "line_number": 90, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 90, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 91, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 92, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 92, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 93, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 93, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}]} +{"seq_id": "515189760", "text": "from flask import Blueprint, request, render_template, redirect, url_for, flash, g\nfrom app import db, models\nfrom forms import CadastroForm\nfrom flask.ext.login import login_user, current_user, login_required\nfrom datetime import date\n\ncadastro_blueprint = Blueprint('cadastro', __name__)\n\n@cadastro_blueprint.route('/cadastro/', methods=['GET', 'POST'])\ndef cadastro():\n\tif request.method == 'POST':\n\t\tusername_cad = request.form['username']\n\t\tsenha_cad = request.form['senha']\n\t\temail_cad = request.form['email']\n\t\tnome_cad = request.form['nome']\n\t\tano_cad = request.form['nascimento']\n\t\tidade_cad = int(date.today().year) - int(ano_cad)\n\t\tmesmo_un = models.Usuario.query.filter_by(username=username_cad).first()\n\t\tmesmo_email = models.Usuario.query.filter_by(email=email_cad).first()\n\t\t#se nao tiver ngm com email ou username igual, adiciona o novo usuario\n\t\tif mesmo_email is None and mesmo_un is None:\n\t\t\tusuario_cadastro = models.Usuario(nome=nome_cad, username=username_cad, senha=senha_cad, email=email_cad, nascimento=ano_cad, idade=idade_cad)\n\t\t\tdb.session.add(usuario_cadastro)\n\t\t\tdb.session.commit()\n\t\t\tlogin_user(usuario_cadastro)\n\t\t\treturn redirect(url_for('users.perfil', username=usuario_cadastro.username))\n\t\tflash('Email ou usarname ja usados')\n\tform = CadastroForm()\n\treturn render_template('cadastro.html', form=form)", "sub_path": "blueprints_treinamento/blueprints/cadastro.py", "file_name": "cadastro.py", "file_ext": "py", "file_size_in_byte": 1338, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "flask.Blueprint", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 11, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 11, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 12, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 12, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 13, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 13, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 14, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 14, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 15, "usage_type": "name"}, {"api_name": "flask.request.form", "line_number": 16, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 16, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 17, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 17, "usage_type": "name"}, {"api_name": "app.models.Usuario.query.filter_by", "line_number": 18, "usage_type": "call"}, {"api_name": "app.models.Usuario", "line_number": 18, "usage_type": "attribute"}, {"api_name": "app.models", "line_number": 18, "usage_type": "name"}, {"api_name": "app.models.Usuario.query.filter_by", "line_number": 19, "usage_type": "call"}, {"api_name": "app.models.Usuario", "line_number": 19, "usage_type": "attribute"}, {"api_name": "app.models", "line_number": 19, "usage_type": "name"}, {"api_name": "app.models.Usuario", "line_number": 22, "usage_type": "call"}, {"api_name": "app.models", "line_number": 22, "usage_type": "name"}, {"api_name": "app.db.session.add", "line_number": 23, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 23, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 23, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 24, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 24, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 24, "usage_type": "name"}, {"api_name": "flask.ext.login.login_user", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 26, "usage_type": "call"}, {"api_name": "flask.flash", "line_number": 27, "usage_type": "call"}, {"api_name": "forms.CadastroForm", "line_number": 28, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 29, "usage_type": "call"}]} +{"seq_id": "515722756", "text": "# imports --------------------\n\nimport numpy as np\nimport json\nfrom collections import OrderedDict\nimport time\nimport torch\nfrom torch import nn, optim\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms, models\nfrom torch.autograd import Variable\nimport argparse\n\n# command line arguments --------------------\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"data_directory\",\n action = \"store\",\n type = str)\n\nparser.add_argument(\"--category_names\",\n action = \"store\",\n default = \"cat_to_name.json\",\n dest = \"category_names\",\n type = str,\n help = \"JSON file with category names\")\n\nparser.add_argument(\"--arch\",\n action = \"store\",\n default = \"vgg13\",\n dest = \"arch\",\n type = str,\n help = \"Pretrained model; supported models: vgg11, vgg13, vgg17, vgg19, densenet121, densenet169, densent201, densenet161\")\n\nparser.add_argument(\"--gpu\",\n action = \"store_const\",\n const = \"cuda:0\",\n help = \"Use GPU for training\") \n \nparser.add_argument(\"--learning_rate\",\n action = \"store\",\n dest = \"learning_rate\",\n default = 0.001,\n type = float,\n help = \"Learning rate\") # with a suggested learning rate of 0.01, all my models perform beyond dreadful\n\nparser.add_argument(\"--hidden_units\",\n action = \"store\",\n dest = \"hidden_units\",\n default = 512,\n type = int,\n help = \"Units in the hidden layer\") \n\nparser.add_argument(\"--epochs\",\n action = \"store\",\n dest = \"epochs\",\n default = 20,\n type = int,\n help = \"Epochs\")\n\nparser.add_argument(\"--save_dir\",\n action = \"store\",\n default = \".\",\n dest = \"save_dir\",\n type = str,\n help = \"Folder for saved checkpoints\")\n\nargs = parser.parse_args()\n\nprint(\"--------------------\")\nprint(\"Selected arguments:\")\nprint(\"Directory of data:\", args.data_directory)\nprint(\"JSON file with category names:\", args.category_names)\nprint(\"Pretrained model architecture:\", args.arch)\nprint(args.gpu, \"; if = cuda:0, then training on GPU\")\nprint(\"Learning rate:\", args.learning_rate)\nprint(\"Units in the hidden layer:\", args.hidden_units)\nprint(\"Epochs:\", args.epochs)\nprint(\"Directory of saved checkpoint.pth:\", args.save_dir) \nprint(\"--------------------\")\n \n# data directories --------------------\n\ndata_dir = args.data_directory\ntrain_dir = data_dir + \"/train\"\nvalid_dir = data_dir + \"/valid\"\ntest_dir = data_dir + \"/test\"\n\n# data loaders --------------------\n\ntrain_transforms = transforms.Compose([transforms.RandomRotation(25),\n transforms.RandomResizedCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\nvalid_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\ntest_transforms = transforms.Compose([transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])\n\ntrain_data = datasets.ImageFolder(data_dir + \"/train\", transform = train_transforms)\nvalid_data = datasets.ImageFolder(data_dir + \"/valid\", transform = valid_transforms)\ntest_data = datasets.ImageFolder(data_dir + \"/test\", transform = test_transforms)\n\nbatch_size = 32\n\ntrainloader = torch.utils.data.DataLoader(train_data, batch_size = batch_size, shuffle = True)\nvalidloader = torch.utils.data.DataLoader(valid_data, batch_size = batch_size, shuffle = True)\ntestloader = torch.utils.data.DataLoader(test_data, batch_size = batch_size)\n\ntrain_obs = 6552\nvalid_obs = 818\ntest_obs = 819\n\n# class names --------------------\n\nwith open(args.category_names, 'r') as f:\n cat_to_name = json.load(f)\n \n# define model and hidden units --------------------\n\nmodel = models.__dict__[args.arch](pretrained=True) # ARGUMENT\n\nfor param in model.parameters():\n param.requires_grad = False\n \ndensenet_inputs = {\"densenet121\": 1024, \"densenet169\": 1664, \"densenet201\": 1920, \"densenet161\": 2208}\n\nif args.arch in [\"vgg11\", \"vgg13\", \"vgg16\", \"vgg19\"]:\n input_units = model.classifier[0].in_features # ARGUMENT\nif args.arch in [\"densenet121\", \"densenet169\", \"densene201\", \"densenet161\"]:\n input_units = densenet_inputs[arch] # ARGUMENT\n \nhidden_units = args.hidden_units # ARGUMENT\noutput_units = len(cat_to_name)\n\nclassifier = nn.Sequential(OrderedDict([\n (\"fc1\", nn.Linear(input_units, hidden_units)), # ARGUMENT \n (\"relu\", nn.ReLU()),\n (\"dropout\", nn.Dropout(p = 0.2)),\n (\"fc2\", nn.Linear(hidden_units, output_units)), # ARGUMENT \n (\"output\", nn.LogSoftmax(dim = 1))\n ]))\n \nmodel.classifier = classifier \n \n# set model parameters --------------------\n\nbatches = len(trainloader.batch_sampler)\nprint(f\"Training model on {train_obs} images in {batches} batches of batch size {batch_size}\")\n\ncriterion = nn.NLLLoss()\noptimizer = optim.Adam(model.classifier.parameters(), lr = args.learning_rate) \n\n# define device --------------------\n\nxdevice = args.gpu\nif xdevice == \"cuda:0\":\n device = torch.device(xdevice)\nelse:\n device = torch.device(\"cpu\")\nmodel = model.to(device)\nprint(\"Training model on\", device)\n\n# start training --------------------\n\nstart_time = time.time()\nmodel.zero_grad()\n\nepochs = args.epochs\n\nfor e in range(epochs):\n\n print(f\"Starting training for epoch {e+1} of {epochs}\")\n \n total = 0\n correct = 0\n running_loss = 0\n\n for ii, (images, labels) in enumerate(trainloader):\n images, labels = images.to(device), labels.to(device)\n \n optimizer.zero_grad() \n \n # forward and backward passes\n outputs = model.forward(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n \n # running loss of epoch\n running_loss += loss.item()\n \n # accuracy of epoch\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels).sum().item()\n \n # print after each epoch\n print(f\"Epoch {e+1} of {epochs}\", \n \"--- Training loss:{:.4f}\".format(running_loss/batches), \n \"--- Training accuracy:{:.4f}\".format(correct/total))\n\n # evaluate model in validation set\n \n # reset metrics for epoch\n valid_correct = 0\n valid_total = 0\n valid_running_loss = 0\n\n # don't calculate gradients\n with torch.no_grad():\n for ii, (images, labels) in enumerate(validloader):\n images, labels = images.to(device), labels.to(device)\n\n outputs = model(images)\n loss = criterion(outputs, labels) #neww\n valid_running_loss += loss.item() #new\n \n # accuracy of epoch\n _, predicted = torch.max(outputs.data, 1)\n valid_total += labels.size(0)\n valid_correct += (predicted == labels).sum().item()\n \n # print after each epoch\n print(f\"Epoch {e+1} of {epochs}\", \n \"--- Validation loss:{:.4f}\".format(valid_running_loss/batches), \n \"--- Validation accuracy:{:.4f}\".format(valid_correct/valid_total))\n \nend_time = time.time()\nduration = (end_time - start_time)//60\nprint(\"Training complete\")\nprint(f\"Training time: {duration} minutes\")\n\n# Save model to checkpoint --------------------\n\nmodel.class_to_idx = train_data.class_to_idx\nmodel_state = {\n \"epoch\": epochs,\n \"state_dict\": model.state_dict(),\n \"optimizer_dict\": optimizer.state_dict(),\n \"classifier\": classifier,\n \"class_to_idx\": model.class_to_idx,\n \"arch\": args.arch \n}\n\nsave_location = f\"{args.save_dir}/checkpoint.pth\"\ntorch.save(model_state, save_location)\n\nprint(f\"Model saved to {save_location}/checkpoint.pth\")", "sub_path": "03_Deep_Learning/train.py", "file_name": "train.py", "file_ext": "py", "file_size_in_byte": 8734, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 16, "usage_type": "call"}, {"api_name": "torchvision.transforms.Compose", "line_number": 92, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 92, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomRotation", "line_number": 92, "usage_type": "call"}, {"api_name": "torchvision.transforms.RandomResizedCrop", "line_number": 93, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 93, "usage_type": "name"}, {"api_name": "torchvision.transforms.RandomHorizontalFlip", "line_number": 94, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 94, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 95, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 95, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 96, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 96, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 98, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 98, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 98, "usage_type": "call"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 99, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 99, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 100, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 100, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 101, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 101, "usage_type": "name"}, {"api_name": "torchvision.transforms.Compose", "line_number": 103, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 103, "usage_type": "name"}, {"api_name": "torchvision.transforms.Resize", "line_number": 103, "usage_type": "call"}, {"api_name": "torchvision.transforms.CenterCrop", "line_number": 104, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 104, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 105, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 105, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 106, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 106, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 108, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 108, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 109, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 109, "usage_type": "name"}, {"api_name": "torchvision.datasets.ImageFolder", "line_number": 110, "usage_type": "call"}, {"api_name": "torchvision.datasets", "line_number": 110, "usage_type": "name"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 114, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 114, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 115, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 115, "usage_type": "attribute"}, {"api_name": "torch.utils.data.DataLoader", "line_number": 116, "usage_type": "call"}, {"api_name": "torch.utils", "line_number": 116, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 125, "usage_type": "call"}, {"api_name": "torchvision.models.__dict__", "line_number": 129, "usage_type": "attribute"}, {"api_name": "torchvision.models", "line_number": 129, "usage_type": "name"}, {"api_name": "torch.nn.Sequential", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 144, "usage_type": "name"}, {"api_name": "collections.OrderedDict", "line_number": 144, "usage_type": "call"}, {"api_name": "torch.nn.Linear", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 145, "usage_type": "name"}, {"api_name": "torch.nn.ReLU", "line_number": 146, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 146, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 147, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 147, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 148, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 148, "usage_type": "name"}, {"api_name": "torch.nn.LogSoftmax", "line_number": 149, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 149, "usage_type": "name"}, {"api_name": "torch.nn.NLLLoss", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 159, "usage_type": "name"}, {"api_name": "torch.optim.Adam", "line_number": 160, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 160, "usage_type": "name"}, {"api_name": "torch.device", "line_number": 166, "usage_type": "call"}, {"api_name": "torch.device", "line_number": 168, "usage_type": "call"}, {"api_name": "time.time", "line_number": 174, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 202, "usage_type": "call"}, {"api_name": "torch.no_grad", "line_number": 219, "usage_type": "call"}, {"api_name": "torch.max", "line_number": 228, "usage_type": "call"}, {"api_name": "time.time", "line_number": 237, "usage_type": "call"}, {"api_name": "torch.save", "line_number": 255, "usage_type": "call"}]} +{"seq_id": "348351880", "text": "from datetime import datetime\n\nimport aiopg\nimport gino\nfrom aiohttp import web\nfrom asyncpg import UniqueViolationError\n\nDB_DSN = f\"postgres://username:password@127.0.0.1:5432/db_name\"\n\ndb = gino.Gino()\n\n\nclass BaseModel:\n\n @classmethod\n async def get_or_404(cls, id_):\n instance = await cls.get(id_)\n if not instance:\n raise web.HTTPNotFound()\n return instance\n\n @classmethod\n async def create_instance(cls, **kwargs):\n try:\n instance = await cls.create(**kwargs)\n return instance\n except UniqueViolationError:\n raise web.HTTPBadRequest()\n\n\nclass Announcement(db.Model, BaseModel):\n __tablename__ = 'announcements'\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(300), nullable=False)\n description = db.Column(db.String(1000), nullable=False)\n created = db.Column(db.DateTime, default=datetime.today)\n owner_fullname = db.Column(db.String, nullable=False)\n\n\nclass ServerStatus(web.View):\n\n async def get(self):\n return web.json_response({'status': 'ok'})\n\n\nasync def register_pg_pool(app):\n print('App start')\n async with aiopg.create_pool(DB_DSN) as pool:\n app['pg_pool'] = pool\n yield\n print('App finish')\n\n\nasync def register_orm(app):\n await db.set_bind(DB_DSN)\n yield\n await db.pop_bind().close()\n\n\nclass AnnouncementsView(web.View):\n async def get(self):\n pool = self.request.app['pg_pool']\n async with pool.acquire() as conn:\n async with conn.cursor() as cursor:\n await cursor.execute('SELECT * FROM announcements')\n db_response = await cursor.fetchall()\n return web.json_response(db_response)\n\n\nclass AnnouncementView(web.View):\n async def get(self):\n announcement_id = int(self.request.match_info['announcement_id'])\n announcement = await Announcement.get_or_404(announcement_id)\n return web.json_response(announcement.to_dict())\n\n async def post(self):\n announcement_data = await self.request.json()\n new_announcement = await Announcement.create_instance(**announcement_data)\n return web.json_response(new_announcement.to_dict())\n\n\napp = web.Application()\napp.add_routes([web.get('/status', ServerStatus)])\napp.add_routes([web.get('/announcements', AnnouncementsView)])\napp.add_routes([web.get('/announcement/{announcement_id:\\d+}', AnnouncementView)])\napp.add_routes([web.get('/announcement', AnnouncementView)])\napp.cleanup_ctx.append(register_pg_pool)\napp.cleanup_ctx.append(register_orm)\nweb.run_app(app, port=5001)\n", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 2631, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "gino.Gino", "line_number": 10, "usage_type": "call"}, {"api_name": "aiohttp.web.HTTPNotFound", "line_number": 19, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 19, "usage_type": "name"}, {"api_name": "asyncpg.UniqueViolationError", "line_number": 27, "usage_type": "name"}, {"api_name": "aiohttp.web.HTTPBadRequest", "line_number": 28, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 28, "usage_type": "name"}, {"api_name": "datetime.datetime.today", "line_number": 36, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 36, "usage_type": "name"}, {"api_name": "aiohttp.web.View", "line_number": 40, "usage_type": "attribute"}, {"api_name": "aiohttp.web", "line_number": 40, "usage_type": "name"}, {"api_name": "aiohttp.web.json_response", "line_number": 43, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 43, "usage_type": "name"}, {"api_name": "aiopg.create_pool", "line_number": 48, "usage_type": "call"}, {"api_name": "aiohttp.web.View", "line_number": 60, "usage_type": "attribute"}, {"api_name": "aiohttp.web", "line_number": 60, "usage_type": "name"}, {"api_name": "aiohttp.web.json_response", "line_number": 67, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 67, "usage_type": "name"}, {"api_name": "aiohttp.web.View", "line_number": 70, "usage_type": "attribute"}, {"api_name": "aiohttp.web", "line_number": 70, "usage_type": "name"}, {"api_name": "aiohttp.web.json_response", "line_number": 74, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 74, "usage_type": "name"}, {"api_name": "aiohttp.web.json_response", "line_number": 79, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 79, "usage_type": "name"}, {"api_name": "aiohttp.web.Application", "line_number": 82, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 82, "usage_type": "name"}, {"api_name": "aiohttp.web.get", "line_number": 83, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 83, "usage_type": "name"}, {"api_name": "aiohttp.web.get", "line_number": 84, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 84, "usage_type": "name"}, {"api_name": "aiohttp.web.get", "line_number": 85, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 85, "usage_type": "name"}, {"api_name": "aiohttp.web.get", "line_number": 86, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 86, "usage_type": "name"}, {"api_name": "aiohttp.web.run_app", "line_number": 89, "usage_type": "call"}, {"api_name": "aiohttp.web", "line_number": 89, "usage_type": "name"}]} +{"seq_id": "345205796", "text": "import numpy as np\nimport cv2\n\nimg = cv2.imread('test.jpg')\nif img is None:\n print('No such image.')\nelse:\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n # find Harrirs corners\n gray = np.float32(gray)\n dst = cv2.cornerHarris(gray, 2, 3, 0.04)\n\n dst = cv2.dilate(dst, None)\n # img[dst > 0.01*dst.max()] = [0, 0, 255]\n # cv2.imshow('dst', img)\n # if cv2.waitKey(0) == 27:\n # cv2.destroyAllWindows()\n\n _, dst = cv2.threshold(dst, 0.01 * dst.max(), 255, 0)\n dst = np.uint8(dst)\n # find centroids\n ret, labels, stats, centroids = cv2.connectedComponentsWithStats(dst)\n\n # define the criteria to stop and refine the corners\n criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)\n corners = cv2.cornerSubPix(gray, np.float32(centroids), (5, 5), (-1, -1),\n criteria)\n\n # Now draw them\n res = np.hstack((centroids, corners))\n res = np.int0(res)\n img[res[:, 1], res[:, 0]] = [0, 0, 255]\n img[res[:, 3], res[:, 2]] = [0, 255, 0]\n\n cv2.imwrite('test_result.png', img)\n", "sub_path": "python/opencv/cornerSubPix.py", "file_name": "cornerSubPix.py", "file_ext": "py", "file_size_in_byte": 1183, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "cv2.imread", "line_number": 4, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 8, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2GRAY", "line_number": 8, "usage_type": "attribute"}, {"api_name": "numpy.float32", "line_number": 11, "usage_type": "call"}, {"api_name": "cv2.cornerHarris", "line_number": 12, "usage_type": "call"}, {"api_name": "cv2.dilate", "line_number": 14, "usage_type": "call"}, {"api_name": "cv2.threshold", "line_number": 20, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.connectedComponentsWithStats", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.TERM_CRITERIA_EPS", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.TERM_CRITERIA_MAX_ITER", "line_number": 26, "usage_type": "attribute"}, {"api_name": "cv2.cornerSubPix", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.float32", "line_number": 27, "usage_type": "call"}, {"api_name": "numpy.hstack", "line_number": 31, "usage_type": "call"}, {"api_name": "numpy.int0", "line_number": 32, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 36, "usage_type": "call"}]} +{"seq_id": "467048733", "text": "from django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom django.contrib import auth\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response, render\nfrom django.template import RequestContext\n\nfrom emailusernames.forms import EmailAuthenticationForm\nfrom emailusernames.utils import get_user\n\nfrom albatross_api.api.forms import UserCreateForm\nfrom albatross_api.api.models import UserProfile\nfrom albatross_api.payments.models import Subscription\n\n\nLOGIN_ERROR_KEY = 'login_error'\n\n\ndef index(request):\n return render_to_response('website/index.html', {'error': request.session.get(LOGIN_ERROR_KEY, False)}, context_instance=RequestContext(request))\n\n\ndef about(request):\n return render_to_response('website/about.html')\n\n\ndef subscribe(request):\n complete = False\n if request.method == 'POST': # If the form has been submitted...\n form = UserCreateForm(request=request, data=request.POST) # A form bound to the POST data\n if form.is_valid():\n user, message = form.save()\n complete = True\n else:\n form = UserCreateForm(request=request)\n return render_to_response('website/subscribe.html', {'form': form, 'complete': complete}, context_instance=RequestContext(request))\n\n\ndef contact(request):\n return render_to_response('website/contact.html')\n\n\ndef faq(request):\n return render_to_response('website/faq.html')\n\n\ndef login(request):\n if request.method == 'POST':\n form = EmailAuthenticationForm(data=request.POST)\n if form.is_valid():\n emailAddress = form.cleaned_data.get('email')\n user = get_user(emailAddress)\n if request.session.get(LOGIN_ERROR_KEY, False) == True:\n del request.session[LOGIN_ERROR_KEY]\n return HttpResponseRedirect(reverse('albatross_api.website.views.dashboard', args=(user.id,)))\n else:\n request.session[LOGIN_ERROR_KEY] = True\n return HttpResponseRedirect(reverse('albatross_api.website.views.index'))\n else:\n form = EmailAuthenticationForm()\n return render_to_response('website/login.html', {'form': form, 'error': request.session.get(LOGIN_ERROR_KEY, False)}, context_instance=RequestContext(request))\n\n\ndef logout(request):\n auth.logout(request)\n return HttpResponseRedirect(reverse('albatross_api.website.views.index'))\n\n\ndef dashboard(request, id):\n user = User.objects.get(id__exact=id)\n userProfile = UserProfile.objects.get(user__id__exact=user.id)\n subscription = Subscription.objects.filter(profile=userProfile)\n return render_to_response('website/dashboard.html', {\n 'user': user,\n 'profile': userProfile,\n 'subscription': None if len(subscription) == 0 else subscription[0],\n },\n context_instance=RequestContext(request))", "sub_path": "django/Albatross-Technologies/albatross-technologies/albatross_api/website/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 2882, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "django.shortcuts.render_to_response", "line_number": 20, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 20, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 24, "usage_type": "call"}, {"api_name": "albatross_api.api.forms.UserCreateForm", "line_number": 30, "usage_type": "call"}, {"api_name": "albatross_api.api.forms.UserCreateForm", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 36, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 36, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 40, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 44, "usage_type": "call"}, {"api_name": "emailusernames.forms.EmailAuthenticationForm", "line_number": 49, "usage_type": "call"}, {"api_name": "emailusernames.utils.get_user", "line_number": 52, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 55, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 55, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 58, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 58, "usage_type": "call"}, {"api_name": "emailusernames.forms.EmailAuthenticationForm", "line_number": 60, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 61, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 61, "usage_type": "call"}, {"api_name": "django.contrib.auth.logout", "line_number": 65, "usage_type": "call"}, {"api_name": "django.contrib.auth", "line_number": 65, "usage_type": "name"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 66, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 66, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects.get", "line_number": 70, "usage_type": "call"}, {"api_name": "django.contrib.auth.models.User.objects", "line_number": 70, "usage_type": "attribute"}, {"api_name": "django.contrib.auth.models.User", "line_number": 70, "usage_type": "name"}, {"api_name": "albatross_api.api.models.UserProfile.objects.get", "line_number": 71, "usage_type": "call"}, {"api_name": "albatross_api.api.models.UserProfile.objects", "line_number": 71, "usage_type": "attribute"}, {"api_name": "albatross_api.api.models.UserProfile", "line_number": 71, "usage_type": "name"}, {"api_name": "albatross_api.payments.models.Subscription.objects.filter", "line_number": 72, "usage_type": "call"}, {"api_name": "albatross_api.payments.models.Subscription.objects", "line_number": 72, "usage_type": "attribute"}, {"api_name": "albatross_api.payments.models.Subscription", "line_number": 72, "usage_type": "name"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 73, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "578949923", "text": "import numpy as np\nimport cv2\n\n\ndef access_pixels(image):\n print(image.shape)\n height = image.shape[0]\n width = image.shape[1]\n channels = image.shape[2]\n print(\"width : %s, height : %s, channels : %s\" % (width, height, channels))\n for row in range(height):\n for col in range(width):\n for c in range(channels):\n pv = image[row, col, c]\n image[row, col, c] = 255 - pv # 修改\n cv2.imshow(\"pixels_demo\", image)\n\n\ndef inverse(image):\n dst = cv2.bitwise_not(image)\n cv2.imshow(\"inverse learn\", dst)\n\n\ndef create_image():\n img = np.zeros([400, 400, 3], np.uint8)\n # img = np.zeros([400, 400, 1], np.uint8) # 单通道图片\n # img[:, :, 0] = np.ones([400, 400])*127 # 单通道设置灰度图\n img[:, :, 0] = np.ones([400, 400]) * 255\n # img[:, :, 1] = np.ones([400, 400])*255\n # img[:, :, 2] = np.ones([400, 400])*255\n cv2.imshow(\"new image\", img)\n # cv2.imwrite(\"images/learn.img\")\n\n\nprint(\"---------- Hello OpenCV ----------\")\n# src = cv2.imread(\"images/mask_Crystal.jpg\")\n# cv2.namedWindow(\"input image\", cv2.WINDOW_AUTOSIZE)\n# cv2.imshow(\"input image\", src)\nt1 = cv2.getTickCount()\n# access_pixels(src)\ncreate_image()\n# inverse(src)\n\nt2 = cv2.getTickCount()\ntime = (t2 - t1) / cv2.getTickFrequency()\nprint(\"time : %s ms\" % (time * 1000))\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n", "sub_path": "006-图像处理/OpenCV/my-demo/结合NumPy进行数组操作.py", "file_name": "结合NumPy进行数组操作.py", "file_ext": "py", "file_size_in_byte": 1381, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "cv2.imshow", "line_number": 16, "usage_type": "call"}, {"api_name": "cv2.bitwise_not", "line_number": 20, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.uint8", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 28, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 31, "usage_type": "call"}, {"api_name": "cv2.getTickCount", "line_number": 39, "usage_type": "call"}, {"api_name": "cv2.getTickCount", "line_number": 44, "usage_type": "call"}, {"api_name": "cv2.getTickFrequency", "line_number": 45, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 47, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "483273810", "text": "import pandas as pd\nimport numpy as np\nimport operator\nimport xgboost as xgb\nfrom sklearn.cross_validation import KFold\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import log_loss\nfrom operator import add\nfrom sklearn.feature_selection import VarianceThreshold\nimport pdb\nfrom hyperopt import STATUS_OK, Trials, fmin, hp, tpe\nfrom sklearn.decomposition import PCA\nimport sys\n\nfrom my_model import MyModel\n\nclass MyXgbModel(MyModel):\n\n def current_params(self):\n params = {\n 'objective': 'binary:logistic',\n 'eval_metric': 'auc',\n 'colsample_bytree': 0.6,\n 'eta': 0.02,\n 'gamma': 0.7,\n 'subsample': 0.6,\n 'max_depth': 3,\n 'silent': 1,\n 'min_child_weight': 5.0,\n 'tree_method': 'exact'\n }\n return(params)\n\n def train_with_params(self, x_train, x_valid, y_train, y_valid, nfold, params):\n d_train = xgb.DMatrix(x_train, label=y_train)\n d_valid = xgb.DMatrix(x_valid, label=y_valid)\n watchlist = [(d_train, 'train'), (d_valid, 'valid')]\n bst = xgb.train(params, d_train, 1000, watchlist, early_stopping_rounds=30, verbose_eval=100)\n return bst\n\n def my_predict_proba(self, bst, d_test): \n p_test = bst.predict(d_test, ntree_limit=bst.best_iteration)\n return p_test\n\n def my_process_test(self,t):\n return xgb.DMatrix(t)\n\n def my_handle_output(self, res):\n return res\n\n def hyperopt_scope(self):\n space = { \n 'eta': hp.quniform('eta', 0.005, 0.1, 0.025),\n 'max_depth': hp.choice('max_depth', [4,5,6,7]),\n 'min_child_weight': hp.quniform('min_child_weight', 1, 6, 1),\n 'subsample': hp.quniform('subsample', 0.5, 1, 0.05),\n 'gamma': hp.quniform('gamma', 0.5, 1, 0.05),\n 'colsample_bytree': hp.quniform('colsample_bytree', 0.5, 1, 0.05),\n 'eval_metric': 'logloss',\n 'objective': 'binary:logistic', \n 'booster': 'gbtree',\n 'tree_method': 'exact',\n 'silent': 1,\n 'seed': 0\n }\n return space", "sub_path": "my_xgb.py", "file_name": "my_xgb.py", "file_ext": "py", "file_size_in_byte": 2049, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "my_model.MyModel", "line_number": 18, "usage_type": "name"}, {"api_name": "xgboost.DMatrix", "line_number": 36, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 37, "usage_type": "call"}, {"api_name": "xgboost.train", "line_number": 39, "usage_type": "call"}, {"api_name": "xgboost.DMatrix", "line_number": 47, "usage_type": "call"}, {"api_name": "hyperopt.hp.quniform", "line_number": 54, "usage_type": "call"}, {"api_name": "hyperopt.hp", "line_number": 54, "usage_type": "name"}, {"api_name": "hyperopt.hp.choice", "line_number": 55, "usage_type": "call"}, {"api_name": "hyperopt.hp", "line_number": 55, "usage_type": "name"}, {"api_name": "hyperopt.hp.quniform", "line_number": 56, "usage_type": "call"}, {"api_name": "hyperopt.hp", "line_number": 56, "usage_type": "name"}, {"api_name": "hyperopt.hp.quniform", "line_number": 57, "usage_type": "call"}, {"api_name": "hyperopt.hp", "line_number": 57, "usage_type": "name"}, {"api_name": "hyperopt.hp.quniform", "line_number": 58, "usage_type": "call"}, {"api_name": "hyperopt.hp", "line_number": 58, "usage_type": "name"}, {"api_name": "hyperopt.hp.quniform", "line_number": 59, "usage_type": "call"}, {"api_name": "hyperopt.hp", "line_number": 59, "usage_type": "name"}]} +{"seq_id": "23765891", "text": "import itertools\n\nfrom fsmpy import Transition as T\n\n# Set of finite-state machines describing various\n# parts of AMQP protocol.\n\nclass Connection:\n initial_state = 'disconnected'\n\n states = [\n 'disconnected',\n 'sent_ProtocolHeaderFrame',\n 'received_ConnectionStart',\n 'sent_ConnectionStartOK',\n 'received_ConnectionSecure',\n 'sent_ConnectionSecureOK',\n 'received_ConnectionTune',\n 'sent_ConnectionTuneOK',\n 'sent_ConnectionOpen',\n 'idle',\n 'received_ConnectionClose',\n 'sent_ConnectionClose',\n ]\n transitions = [\n T(event='send_ProtocolHeaderFrame',\n source='disconnected',\n dest='sent_ProtocolHeaderFrame'),\n\n T(event='receive_ProtocolHeaderFrame',\n source='sent_ProtocolHeaderFrame',\n dest='disconnected'),\n\n T(event='receive_ConnectionStart',\n source='sent_ProtocolHeaderFrame',\n dest='received_ConnectionStart'),\n\n T(event='send_ConnectionStartOK',\n source='received_ConnectionStart',\n dest='sent_ConnectionStartOK'),\n\n T(event='receive_ConnectionSecure',\n source='sent_ConnectionStartOK',\n dest='received_ConnectionSecure'),\n\n T(event='send_ConnectionSecureOK',\n source='received_ConnectionSecure',\n dest='sent_ConnectionSecureOK'),\n\n T(event='receive_ConnectionTune',\n source='sent_ConnectionSecureOK',\n dest='received_ConnectionTune'),\n\n T(event='send_ConnectionTuneOK',\n source='received_ConnectionTune',\n dest='sent_ConnectionTuneOK'),\n\n T(event='receive_ConnectionTune',\n source='sent_ConnectionStartOK',\n dest='received_ConnectionTune'),\n\n T(event='send_ConnectionTuneOK',\n source='received_ConnectionTune',\n dest='sent_ConnectionTuneOK'),\n\n T(event='send_ConnectionOpen',\n source='sent_ConnectionTuneOK',\n dest='sent_ConnectionOpen'),\n\n T(event='receive_ConnectionOpenOK',\n source='sent_ConnectionOpen',\n dest='idle'),\n\n T(event='receive_ConnectionClose',\n source='.*',\n dest='received_ConnectionClose'),\n\n T(event='send_ConnectionCloseOK',\n source='received_ConnectionClose',\n dest='disconnected'),\n\n T(event='send_ConnectionClose',\n source='.*',\n dest='sent_ConnectionClose'),\n\n T(event='receive_ConnectionCloseOK',\n source='sent_ConnectionClose',\n dest='disconnected'),\n ]\n\n\nclass Exchange:\n states = [\n 'sent_ExchangeDeclare',\n 'sent_ExchangeBind',\n 'sent_ExchangeUnbind',\n ]\n\n transitions = [\n T(event='send_ExchangeDeclare',\n source='channel_idle',\n dest='sent_ExchangeDeclare'),\n\n T(event='receive_ExchangeDeclareOK',\n source='sent_ExchangeDeclare',\n dest='channel_idle'),\n\n T(event='receive_ExchangeDeclare_nowait',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='send_ExchangeBind',\n source='channel_idle',\n dest='sent_ExchangeBind'),\n\n T(event='receive_ExchangeBindOK',\n source='sent_ExchangeBind',\n dest='channel_idle'),\n\n T(event='send_ExchangeBind_nowait',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='send_ExchangeUnbind',\n source='channel_idle',\n dest='sent_ExchangeUnbind'),\n\n T(event='receive_ExchangeUnbindOK',\n source='sent_ExchangeUnbind',\n dest='channel_idle'),\n\n T(event='send_ExchangeUnbind_nowait',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='send_ExchangeDelete',\n source='channel_idle',\n dest='sent_ExchangeDelete'),\n\n T(event='receive_ExchangeDeleteOK',\n source='sent_ExchangeDelete',\n dest='channel_idle'),\n\n T(event='send_ExchangeDelete_nowait',\n source='channel_idle',\n dest='channel_idle'),\n ]\n\n\nclass Queue:\n states = [\n 'sent_QueueDeclare',\n 'sent_QueueBind',\n 'sent_QueueUnbind',\n 'sent_QueuePurge',\n 'sent_QueueDelete',\n ]\n\n transitions = [\n T(event='send_QueueDeclare',\n source='channel_idle',\n dest='sent_QueueDeclare'),\n\n T(event='receive_QueueDeclareOK',\n source='sent_QueueDeclare',\n dest='channel_idle'),\n\n T(event='send_QueueDeclare_nowait',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='send_QueueBind',\n source='channel_idle',\n dest='sent_QueueBind'),\n\n T(event='receive_QueueBindOK',\n source='sent_QueueBind',\n dest='channel_idle'),\n\n T(event='send_QueueBind_nowait',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='send_QueueUnbind',\n source='channel_idle',\n dest='sent_QueueUnbind'),\n\n T(event='receive_QueueUnbindOK',\n source='sent_QueueUnbind',\n dest='channel_idle'),\n\n T(event='send_QueuePurge',\n source='channel_idle',\n dest='sent_QueuePurge'),\n\n T(event='receive_QueuePurgeOK',\n source='sent_QueuePurge',\n dest='channel_idle'),\n\n T(event='send_QueuePurge_nowait',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='send_QueueDelete',\n source='channel_idle',\n dest='sent_QueueDelete'),\n\n T(event='receive_QueueDeleteOK',\n source='sent_QueueDelete',\n dest='channel_idle'),\n\n T(event='send_QueueDelete_nowait',\n source='channel_idle',\n dest='channel_idle'),\n ]\n\n\nclass Basic:\n states = [\n 'sent_BasicQos',\n 'sent_BasicConsume',\n 'sent_BasicCancel',\n 'sent_BasicGet',\n 'sent_BasicRecover',\n ]\n\n transitions = [\n T(event='send_BasicQos',\n source='channel_idle',\n dest='sent_BasicQos'),\n\n T(event='receive_BasicQosOK',\n source='sent_BasicQos',\n dest='channel_idle'),\n\n T(event='send_BasicConsume',\n source='channel_idle',\n dest='sent_BasicConsume'),\n\n T(event='receive_BasicConsumeOK',\n source='sent_BasicConsume',\n dest='channel_idle'),\n\n T(event='send_BasicConsume_nowait',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='send_BasicCancel',\n source='channel_idle',\n dest='sent_BasicCancel'),\n\n T(event='receive_BasicCancelOK',\n source='sent_BasicCancel',\n dest='channel_idle'),\n\n T(event='send_BasicCancel_nowait',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='send_BasicPublish',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='receive_BasicReturn',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='receive_BasicDeliver',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='send_BasicGet',\n source='channel_idle',\n dest='sent_BasicGet'),\n\n T(event='receive_BasicGetOK',\n source='sent_BasicGet',\n dest='channel_idle'),\n\n T(event='receive_BasicGetEmpty',\n source='sent_BasicGet',\n dest='channel_idle'),\n\n T(event='send_BasicAck',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='receive_BasicAck',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='send_BasicReject',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='send_BasicRecoverAsync',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='send_BasicRecover',\n source='channel_idle',\n dest='sent_BasicRecover'),\n\n T(event='receive_BasicRecoverOK',\n source='sent_BasicRecover',\n dest='channel_idle'),\n\n T(event='send_BasicNack',\n source='channel_idle',\n dest='channel_idle'),\n\n T(event='receive_BasicNack',\n source='channel_idle',\n dest='channel_idle'),\n ]\n\n\nclass Tx:\n states = [\n 'sent_TxSelect',\n 'sent_TxCommit'\n 'sent_TxRollback',\n ]\n\n transitions = [\n T(event='send_TxSelect',\n # A channel can't start a transaction if it's already\n # holding a transaction or is in confirmation mode.\n source='channel_idle',\n dest='sent_TxSelect'),\n\n T(event='receive_TxSelectOK',\n source='sent_TxSelect',\n dest='channel_idle_tx'),\n\n T(event='send_TxCommit',\n # A commit may only happen in a channel holding a transaction.\n source='channel_idle_tx',\n dest='sent_TxCommit'),\n\n T(event='receive_TxCommitOK',\n source='sent_TxCommit',\n dest='channel_idle'),\n\n T(event='send_TxRollback',\n # A rollback may only happen in a channel holding a transaction.\n source='channel_idle_tx',\n dest='sent_TxRollback'),\n\n T(event='receive_TxRollbackOK',\n source='sent_TxRollback',\n dest='channel_idle'),\n ]\n\n\nclass Confirm:\n states = [\n 'sent_ConfirmSelect',\n ]\n\n transitions = [\n T(event='send_ConfirmSelect',\n # A channel can't be selected to work in confirmation mode\n # if it's already in confirmation mode or a transaction is active.\n source='channel_idle',\n dest='sent_ConfirmSelect'),\n\n T(event='receive_ConfirmSelectOK',\n source='sent_ConfirmSelect',\n dest='channel_idle_confirm'),\n\n T(event='send_ConfirmSelect_nowait',\n source='channel_idle',\n dest='channel_idle_confirm'),\n\n # If confirm mode is enabled, BasicPublish should wait for BasicAck.\n T(event='send_BasicPublish',\n source='channel_idle_confirm',\n dest='sent_BasicPublish_confirm'),\n\n T(event='receive_BasicAck',\n source='sent_BasicPublish_confirm',\n dest='channel_idle_confirm'),\n ]\n\n\nclass ChannelFraming:\n initial_state = 'channel_idle'\n\n states = [\n 'channel_idle',\n 'sent_MethodFrame',\n 'sent_MethodFrame_content',\n 'received_MethodFrame',\n 'received_MethodFrame_content',\n 'sent_ContentHeaderFrame',\n 'received_ContentHeaderFrame',\n ]\n\n transitions = [\n # Client sends something to the server\n T(event='send_MethodFrame',\n source='channel_idle',\n dest='sent_MethodFrame'),\n\n # and received its response.\n T(event='receive_MethodFrame',\n source='sent_MethodFrame',\n dest='channel_idle'),\n\n # Client sends something asynchronously.\n T(event='send_MethodFrame_nowait',\n source='channel_idle',\n dest='channel_idle'),\n\n # Server sends something to the client\n T(event='receive_MethodFrame',\n source='channel_idle',\n dest='received_MethodFrame'),\n\n # and receives clients response.\n T(event='send_MethodFrame',\n source='received_MethodFrame',\n dest='channel_idle'),\n\n T(event='send_MethodFrame_content',\n source='channel_idle',\n dest='sent_MethodFrame_content'),\n\n T(event='receive_MethodFrame_content',\n source='channel_idle',\n dest='received_MethodFrame_content'),\n\n T(event='receive_MethodFrame_content',\n source='sent_MethodFrame',\n dest='received_MethodFrame_content'),\n\n # If ContentHeader comes after MethodFrame,\n T(event='receive_ContentHeaderFrame',\n source='received_MethodFrame_content',\n dest='received_ContentHeaderFrame'),\n\n # zero or more ContentBody frames can arrive.\n T(event='receive_ContentBodyFrame',\n source='received_ContentHeaderFrame',\n dest='received_ContentHeaderFrame'),\n\n # If a MethodFrame arrives, treat this as the end of the body.\n T(event='receive_MethodFrame',\n source='received_ContentHeaderFrame',\n dest='received_MethodFrame'),\n\n T(event='receive_MethodFrame_content',\n source='received_ContentHeaderFrame',\n dest='received_MethodFrame_content'),\n\n T(event='send_MethodFrame',\n source='received_ContentHeaderFrame',\n dest='sent_MethodFrame'),\n\n T(event='send_MethodFrame_content',\n source='received_ContentHeaderFrame',\n dest='sent_MethodFrame_content'),\n\n # Same for the client.\n # If ContentHeader comes after MethodFrame,\n T(event='send_ContentHeaderFrame',\n source='sent_MethodFrame_content',\n dest='sent_ContentHeaderFrame'),\n\n # zero or more ContentBody frames can be sent.\n T(event='send_ContentBodyFrame',\n source='sent_ContentHeaderFrame',\n dest='sent_ContentHeaderFrame'),\n\n # If a MethodFrame is sent, treat this as the end of the body.\n T(event='send_MethodFrame',\n source='sent_ContentHeaderFrame',\n dest='sent_MethodFrame'),\n\n T(event='send_MethodFrame_content',\n source='send_MethodFrame',\n dest='sent_MethodFrame_content'),\n\n T(event='send_MethodFrame_content',\n source='sent_ContentHeaderFrame',\n dest='sent_MethodFrame_content'),\n\n # Don't forget about asynchronous methods.\n T(event='send_MethodFrame_nowait',\n source='sent_ContentHeaderFrame',\n dest='channel_idle'),\n\n T(event='send_MethodFrame',\n source='sent_MethodFrame',\n dest='sent_MethodFrame'),\n ]\n\n\nclass Channel:\n initial_state = 'channel_disconnected'\n\n states = ([\n 'channel_disconnected',\n 'channel_idle',\n 'channel_idle_tx',\n 'channel_idle_confirm',\n 'sent_ChannelOpen',\n 'sent_ChannelClose',\n 'received_ChannelClose',\n 'sent_ChannelFlow',\n 'received_ChannelFlow'] +\n Exchange.states +\n Queue.states +\n Basic.states +\n Confirm.states +\n # Don't forget to add transaction-aware states\n [state + '_tx'\n for state in itertools.chain(\n Exchange.states,\n Queue.states,\n Basic.states)] +\n # Don't forget to add confirm-aware states\n [state + '_confirm'\n for state in itertools.chain(\n Exchange.states,\n Queue.states,\n Basic.states)] +\n Tx.states)\n\n transitions = ([\n T(event='send_ChannelOpen',\n source='channel_disconnected',\n dest='sent_ChannelOpen'),\n\n T(event='receive_ChannelOpenOK',\n source='sent_ChannelOpen',\n dest='channel_idle'),\n\n T(event='send_ChannelClose',\n source='.*',\n dest='sent_ChannelClose'),\n\n T(event='receive_ChannelCloseOK',\n source='sent_ChannelClose',\n dest='channel_disconnected'),\n\n T(event='receive_ChannelClose',\n source='.*',\n dest='received_ChannelClose'),\n\n T(event='send_ChannelCloseOK',\n source='received_ChannelClose',\n dest='channel_disconnected'),\n\n T(event='send_ChannelFlow',\n source='channel_idle',\n dest='sent_ChannelFlow'),\n\n T(event='receive_ChannelFlowOK',\n source='sent_ChannelFlow',\n dest='channel_idle'),\n\n T(event='receive_ChannelFlow',\n source='channel_idle',\n dest='received_ChannelFlow'),\n\n T(event='send_ChannelFlowOK',\n source='received_ChannelFlow',\n dest='channel_idle'),\n ] + Exchange.transitions +\n Queue.transitions +\n Basic.transitions +\n Confirm.transitions +\n # Don't forget to add transaction-aware transitions\n [T(event=t.event, source=t.source + '_tx',\n dest=t.dest+'_tx')\n for t in itertools.chain(\n Exchange.transitions,\n Queue.transitions,\n Basic.transitions)] +\n # Don't forget to add confirm-aware transitions\n [T(event=t.event, source=t.source + '_confirm',\n dest=t.dest+'_confirm')\n for t in itertools.chain(\n Exchange.transitions,\n Queue.transitions,\n Basic.transitions)] +\n Tx.transitions)\n", "sub_path": "amqproto/fsm.py", "file_name": "fsm.py", "file_ext": "py", "file_size_in_byte": 16980, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "fsmpy.Transition", "line_number": 26, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 30, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 34, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 38, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 42, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 46, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 50, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 54, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 58, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 62, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 66, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 70, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 74, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 78, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 82, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 86, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 100, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 104, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 108, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 112, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 116, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 120, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 124, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 128, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 132, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 136, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 140, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 144, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 160, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 164, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 168, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 172, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 176, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 180, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 184, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 188, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 192, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 196, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 200, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 204, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 208, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 212, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 228, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 232, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 236, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 240, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 244, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 248, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 252, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 256, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 260, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 264, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 268, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 272, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 276, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 280, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 284, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 288, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 292, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 296, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 300, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 304, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 308, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 312, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 326, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 332, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 336, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 341, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 345, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 350, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 362, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 368, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 372, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 377, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 381, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 402, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 407, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 412, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 417, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 422, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 426, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 430, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 434, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 439, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 444, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 449, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 453, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 457, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 461, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 467, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 472, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 477, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 481, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 485, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 490, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 494, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 519, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 525, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 532, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 536, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 540, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 544, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 548, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 552, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 556, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 560, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 564, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 568, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 576, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 578, "usage_type": "call"}, {"api_name": "fsmpy.Transition", "line_number": 583, "usage_type": "call"}, {"api_name": "itertools.chain", "line_number": 585, "usage_type": "call"}]} +{"seq_id": "147849835", "text": "import random\r\nimport numpy as np\r\nimport copy\r\nfrom math import sin, cos, e\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport matplotlib.pyplot as plt\r\nfrom scipy import stats\r\n\r\n\r\ndef main_quasi_newton_raphson(f):\r\n # f = lambda x,y: eval(\"sin(1/2*x**2 - 1/4*y**2 + 3)*cos(2*x+1-e**y)\") - example of f\r\n\r\n x1 = random.uniform(-1, 1)\r\n x2 = random.uniform(-1, 1)\r\n x = np.array([x1, x2])\r\n qnr = quasiNewtonRaphson()\r\n qnr.calculate(f, x)\r\n qnr.getStepsCount()\r\n\r\n sX, sY = qnr.getSteps()\r\n\r\n sZ = list(map(f, sX, sY))\r\n return sX, sY, sZ\r\n\r\n\r\n\r\nclass quasiNewtonRaphson:\r\n stepsCount = 0 # initiate steps count as 0 and empty arrays for X and Y\r\n stepsX = []\r\n stepsY = []\r\n def calculate(self, f, x, tol=0.01, maxiter=100):\r\n self.stepsCount = 0\r\n self.stepsX = [x[0]] # assign first values\r\n self.stepsY = [x[1]]\r\n B = approx_update_hessian(f, x) # approx. first matrices\r\n g = approx_gradient(f, x)\r\n\r\n for i in range(1, maxiter):\r\n try:\r\n\r\n xnew = x - np.dot(B, g) # assign as first vector subtracted by dot product of reverseHessian and gradient\r\n xold = np.array([self.stepsX[-1], self.stepsY[-1]])\r\n self.stepsX.append(xnew[0])\r\n self.stepsY.append(xnew[1])\r\n gold = copy.deepcopy(g) # gold - old gradient\r\n g = approx_gradient(f, xnew) \r\n B = approx_update_hessian(f, x) \r\n\r\n\r\n if abs(g[0]) < tol and abs(g[1]) < tol: # if below tolerance no need to pursue further gradient descent\r\n break \r\n\r\n except np.linalg.LinAlgError: # anticipate numpy-related error\r\n print(\"Encountered LinAlgError - breaking\")\r\n break\r\n x = xnew\r\n \r\n self.__stepsCount = i+1\r\n return xnew\r\n\r\n def getStepsCount(self): # get methodes\r\n return self.stepsCount\r\n\r\n def getSteps(self): \r\n return self.stepsX, self.stepsY\r\n \r\n \r\n\r\ndef approx_gradient(f, v): # from definiton of derivative - further explained in documentation\r\n a = 0.01\r\n g1 = (f(x = v[0] + a, y = v[1]) - f(x = v[0] - a, y = v[1])) / (2 * a)\r\n g2 = (f(x = v[0], y = v[1] + a) - f(x = v[0], y = v[1] - a)) / (2 * a)\r\n return np.array([g1, g2])\r\n\r\ndef approx_update_hessian(f, x):\r\n h = 0.01\r\n ex = np.array([h, 0])\r\n ey = np.array([0, h])\r\n H = np.ones([2, 2])\r\n H[0][0], H[1][0] = (approx_gradient(f, x + ex) - approx_gradient(f, x)) / h\r\n H[0][1], H[1][1] = (approx_gradient(f, x + ey) - approx_gradient(f, x)) / h\r\n return np.linalg.inv(H)\r\n\r\n\r\n\r\n\r\ndef generate_plot(f, sX, sY):\r\n X = np.arange(-1*abs(3*min(sX)), abs(3*max(sX)), 0.01) # generate array of points\r\n Y = np.arange(-1*abs(3*min(sY)), abs(3*min(sY)), 0.01)\r\n\r\n X, Y = np.meshgrid(X, Y) # calculate meshgrid of both\r\n Z = f(X, Y)\r\n\r\n plt.figure(figsize=(8, 8)) # visual paramters\r\n\r\n plt.grid(False)\r\n\r\n plt.contour(X, Y, Z, np.linspace(-1, 1, 15), cmap='Spectral') # create contours\r\n\r\n plt.xlabel('x')\r\n plt.ylabel('y')\r\n\r\n\r\n\r\n for i in range(1, len(sX)): # draw arrows connect steps\r\n plt.arrow(x=sX[i-1], y=sY[i-1], dx=sX[i]-sX[i-1], dy=sY[i]-sY[i-1], width=0.01, zorder=100, color=\"k\")\r\n\r\n\r\n plt.tight_layout()\r\n plt.colorbar()\r\n plt.show() # show plot\r\n\r\n\r\n\r\n# example test \r\nf = lambda x,y: np.sin(1/2*x**2 - 1/4*y**2 + 3)*np.cos(2*x+1-e**y) #example of f\r\nsX, sY, sZ = main_quasi_newton_raphson(f)\r\ngenerate_plot(f, sX, sY)\r\n\r\n# results = []\r\n# # performing tests\r\n# for i in range(25):\r\n# sx, sy, sz = main_quasi_newton_raphson(f)\r\n# results.append(min(sz))\r\n\r\n# variancy = np.var(results)\r\n# # median absolute deviation\r\n# mad = stats.median_abs_deviation(results)\r\n\r\n# print(mad)\r\n# # print(abs(-1 - np.average(results)))", "sub_path": "asgor/quasi_newton.py", "file_name": "quasi_newton.py", "file_ext": "py", "file_size_in_byte": 4005, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "random.uniform", "line_number": 13, "usage_type": "call"}, {"api_name": "random.uniform", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.dot", "line_number": 41, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 42, "usage_type": "call"}, {"api_name": "copy.deepcopy", "line_number": 45, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 53, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 73, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 77, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 78, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 79, "usage_type": "call"}, {"api_name": "numpy.linalg.inv", "line_number": 82, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 82, "usage_type": "attribute"}, {"api_name": "numpy.arange", "line_number": 88, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 89, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 91, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 94, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 94, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 96, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 96, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.contour", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 98, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 98, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 100, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 100, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 101, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 101, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.arrow", "line_number": 106, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 106, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 109, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 109, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.colorbar", "line_number": 110, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 110, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 111, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 111, "usage_type": "name"}, {"api_name": "numpy.sin", "line_number": 116, "usage_type": "call"}, {"api_name": "numpy.cos", "line_number": 116, "usage_type": "call"}, {"api_name": "math.e", "line_number": 116, "usage_type": "name"}]} +{"seq_id": "365443842", "text": "from cgitb import text\nfrom astropy.io import fits\nfrom matplotlib import transforms\nimport matplotlib.pyplot as plt\nimport os\nfrom numpy.lib.utils import source\nimport pandas as pd\nimport numpy as np\nfrom astropy.wcs import WCS\nimport astropy.coordinates as coord\nfrom astropy import units as u\nimport pyregion\nfrom pvextractor import Path\nfrom pvextractor import extract_pv_slice\nfrom matplotlib import colors\nfrom matplotlib.colors import LogNorm\nimport astropy.units as u\n\nfrom astropy.visualization import (MinMaxInterval, SqrtStretch,\n\t\t\t\t\t\t\t\t ImageNormalize)\n\nfrom matplotlib.patches import Ellipse\nfrom mpl_toolkits.axes_grid1.anchored_artists import AnchoredAuxTransformBox\nfrom mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar\n\ndef header_editor(header):\n\theader.remove('CTYPE3')\n\theader.remove('CRVAL3')\n\theader.remove('CDELT3')\n\theader.remove('CRPIX3')\n\theader.remove('CTYPE4')\n\theader.remove('CRVAL4')\n\theader.remove('CDELT4')\n\theader.remove('CRPIX4')\n\theader.remove('NAXIS3')\n\theader.update(NAXIS=2)\n\theader.remove('NAXIS4')\n\treturn 0\n\ndef axes_setting(ax, show = False, datashape = (512, 512), pixsize = 0.2):\n\t\"\"\"\n\tSet axes for combined maps\n\tpixsize: unit arcsec\n\t\"\"\"\n\tlon = ax.coords[0]\n\tlat = ax.coords[1]\n\t# lon.set_ticklabel_visible(False)\n\t# lat.set_ticklabel_visible(False)\n\n\tlon.set_ticks(spacing=10. * u.arcsec)\n\tlat.set_ticks(spacing=10. * u.arcsec)\n\n\tif not(show):\n\t\tlon.set_axislabel(' ', minpad = 0.1)\n\t\tlat.set_axislabel(' ', minpad = 0.1)\n\t\t# lon.set_ticklabel_visible(False)\n\t\t# lat.set_ticklabel_visible(False)\n\telse:\n\t\t# print(1)\n\t\tlon.set_axislabel('RA', minpad = 0.1)\n\t\tlat.set_axislabel('DEC', minpad = 0.1)\n\t\n\tlon.set_ticks(number=4)\t\n\tlat.set_ticks(number=4)\n\tlon.display_minor_ticks(True)\n\tlat.display_minor_ticks(True)\n\t\t# lon.set_ticklabel_visible(False)\n\t\t# lat.set_ticklabel_visible(False)\n\t\t# lon.set_ticklabel_visible(False)\n\ndef Add_beam_size(ax, contin_header):\n\t\"\"\"\n\tSet beam size and scale bar\n\t\"\"\"\n\t#Add beam sizes\n\tbox = AnchoredAuxTransformBox(ax.transData, loc='lower left')\n\t# Read beam info\n\ttry:\n\t\tbeam_major_deg, beam_minor_deg, beam_angle_deg = contin_header[\"BMAJ\"], contin_header[\"BMIN\"], contin_header[\"BPA\"] # in unit degree\\\n\texcept KeyError:\n\t\tbeam_major_deg, beam_minor_deg, beam_angle_deg = 4.85e-4, 3.69e-4, -85.25\n\t\n\tbeam_major, beam_minor = beam_major_deg*3600, beam_minor_deg*3600 # in unit arcsec\n\t# print(beam_major)\n\n\tpix_scale = contin_header['CDELT1']*3600 # Convert to arcsec\n\n\tel = Ellipse((0, 0), width = beam_minor/pix_scale, height = beam_major/pix_scale, angle = beam_angle_deg, facecolor = 'none', edgecolor = 'black')\n\tbox.drawing_area.add_artist(el)\n\t\n\tax.add_artist(box)\n\n\tpixel_length = (pix_scale*distance*u.au).to(u.pc) # in unit AU\n\tset_scale = 0.1*u.pc # in unit AU\n\tscalebar = AnchoredSizeBar(ax.transData,\n\t\t\t\t\t\t\t(set_scale/pixel_length).value, '%.1f pc'%set_scale.value, 'lower right', \n\t\t\t\t\t\t\tpad=0.1,\n\t\t\t\t\t\t\tcolor='black',\n\t\t\t\t\t\t\tframeon=False,\n\t\t\t\t\t\t\tsize_vertical=0.8)\n\tax.add_artist(scalebar)\n\n\treturn 0\n\n\ndef condenpos(conden_info, contin_wcs):\n\t\"\"\"\n\tGet the x-y position of target condensation\n\t\"\"\"\n\tspatial_ra = conden_info.iloc[0][\"Ra\"]\n\tspatial_dec = conden_info.iloc[0][\"Dec\"]\n\t\t\n\tradius = round(float(conden_info['Size_FWHM'])/distance/np.pi*180*3600/2, 4)\n\n\tregionx, regiony = round(float(spatial_ra), 4), round(float(spatial_dec), 4)\n\n\txcen_raw, ycen_raw = contin_wcs.all_world2pix(regionx, regiony, 1)\n\txcen, ycen = xcen_raw-1, ycen_raw-1\n\t\n\treturn xcen, ycen\n\n\ndef plot_cont_map_part(source_list, fig, row, col):\n\t\"\"\"\n\tPlot the continuum emission partially\n\t\"\"\"\n\tfor source_name, subplot_index in zip(source_list, range(len(source_list))):\n\t# Read continuum data\n\t\tsource_info= sample_info[sample_info.source_name == source_name]\n\t\tcontin_file_name = source_info.iloc[0][\"continuum_file\"]\n\t\tcontin_data, contin_header = fits.getdata(contin_path+contin_file_name, header = True)\n\n\t\theader_editor(contin_header)\n\t\tcontin_wcs = WCS(contin_header)\n\t\t#####################################################\n\t\t# Set axes for outflow maps\n\t\tax = fig.add_subplot(row, col, subplot_index+1, projection = contin_wcs)\n\t\n\t\tif subplot_index != col*(row-1):\n\t\t\taxes_setting(ax)\n\t\t# Set relative coordinate ticks\n\t\telif subplot_index == col*(row-1):\n\t\t\taxes_setting(ax, show = True, datashape = contin_data[0,0].shape, pixsize = 0.2)\n\t\t\n\t\t# Zoom in\n\t\txmax, ymax = contin_data[0, 0].shape\n\t\tif not(\"DR\" in source_name):\n\t\t\tax.set_xlim(0.2*xmax, 0.8*xmax)\n\t\t\tax.set_ylim(0.2*ymax, 0.8*ymax)\n\n\t\t# Plot SMA 1.3 mm continuum map\n\t\tcontinuum_max, continuum_min = np.nanmax(contin_data), np.nanmin(contin_data)\n\t\t# Create interval object\n\t\tinterval = MinMaxInterval()\n\t\tvmin, vmax = interval.get_limits(contin_data[0, 0])\n\t\tnorm = ImageNormalize(vmin=vmin, vmax=vmax, stretch=SqrtStretch())\n\n\t\t# Add beam size\n\t\tAdd_beam_size(ax, contin_header=contin_header)\n\n\t\t# Create an ImageNormalize object using a SqrtStretch object\n\t\tax.imshow(contin_data[0,0], transform = ax.get_transform(contin_wcs), cmap = plt.cm.gray_r, norm=norm)\n\n\t\t# Continuum contour\n\t\tcontin_sigma = np.nanstd(contin_data[0,0])/2\n\t\tcontin_levels = [contin_sigma*i for i in [-4, 3, 6, 9, 12, 18, 24, 36, 48, 60]]\n\t\tax.contour(contin_data[0, 0], levels = contin_levels, colors = 'black', linewidths = 0.5)\n\n\t\t# Annonate condensation name\n\t\ttarget_name = list(filter(lambda x: source_info.iloc[0] [\"other_name\"] in x, conden_cata[\"Name_core\"]))\n\n\t\tconden_info_list = [conden_cata[conden_cata.Name_core == cond_name] for cond_name in target_name]\n\n\t\tconden_pos_list = [condenpos(conden_info, contin_wcs) for conden_info in conden_info_list]\n\n\t\t# Plot condensation name\n\t\tfor conden_pos, conden_index in zip(conden_pos_list, range(len(conden_pos_list))):\n\t\t\t# ax.text(conden_pos[0], conden_pos[1], \"MM%d\"%(conden_index+1), size= 6)\n\t\t\tax.scatter(conden_pos[0], conden_pos[1], marker = \"+\", color = \"red\", s = 16, linewidth = 0.5)\n\t\t\t# if conden_pos[0]>xlim_min and conden_pos[0]ylim_min and conden_pos[1] 0:\n last = len(sorted_box_indices) - 1\n i = sorted_box_indices[last]\n selected_indices.append(i)\n box = [x_min[i], y_min[i], x_max[i], y_max[i]]\n box = np.asarray(box)\n #print(box.shape)\n #print(x_min[sorted_box_indices[:last]].shape)\n #print(y_max[sorted_box_indices[:last]].shape)\n #print(x_min[sorted_box_indices[:last]].shape)\n #print(y_max[sorted_box_indices[:last]].shape)\n test_boxes = [x_min[sorted_box_indices[:last], None],\n y_min[sorted_box_indices[:last], None],\n x_max[sorted_box_indices[:last], None],\n y_max[sorted_box_indices[:last], None]]\n #boxes = np.asarray(boxes)\n test_boxes = np.concatenate(test_boxes, axis=-1)\n #print(boxes.shape)\n iou = self._calculate_intersection_over_unions(box, test_boxes)\n #xx1 = np.maximum(x_min[i], x_min[idxs[:last]])\n #yy1 = np.maximum(y_min[i], y_min[idxs[:last]])\n #xx2 = np.minimum(x_max[i], x_max[idxs[:last]])\n #yy2 = np.minimum(y_max[i], y_max[idxs[:last]])\n #width = np.maximum(0, xx2 - xx1)\n #height = np.maximum(0, yy2 - yy1)\n #overlap = (width * height) / area[idxs[:last]]\n \"\"\" Here I can include another condition in the np.where\n in order to delete if and only if the boxes are of the\n same class.\n \"\"\"\n current_class = np.argmax(classes[i])\n box_classes = np.argmax(classes[sorted_box_indices[:last]], axis=-1)\n class_mask = current_class == box_classes\n #print(class_mask)\n #print(overlap)\n overlap_mask = iou > iou_threshold\n #print(overlap_mask)\n delete_mask = np.logical_and(overlap_mask, class_mask)\n sorted_box_indices = np.delete(sorted_box_indices, np.concatenate(([last],\n np.where(delete_mask)[0])))\n return boxes[selected_indices]\n\n\n def _decode_boxes(self, predicted_boxes):\n prior_x_min = self.prior_boxes[:, 0]\n prior_y_min = self.prior_boxes[:, 1]\n prior_x_max = self.prior_boxes[:, 2]\n prior_y_max = self.prior_boxes[:, 3]\n\n prior_width = prior_x_max - prior_x_min\n prior_height = prior_y_max - prior_y_min\n prior_center_x = 0.5 * (prior_x_max + prior_x_min)\n prior_center_y = 0.5 * (prior_y_max + prior_y_min)\n\n # TODO rename to g_hat_center_x all the other variables \n pred_center_x = predicted_boxes[:, 0]\n pred_center_y = predicted_boxes[:, 1]\n pred_width = predicted_boxes[:, 2]\n pred_height = predicted_boxes[:, 3]\n\n scale_center_x = self.box_scale_factors[0]\n scale_center_y = self.box_scale_factors[1]\n scale_width = self.box_scale_factors[2]\n scale_height = self.box_scale_factors[3]\n\n decoded_center_x = pred_center_x * prior_width * scale_center_x\n decoded_center_x = decoded_center_x + prior_center_x\n decoded_center_y = pred_center_y * prior_height * scale_center_y\n decoded_center_y = decoded_center_y + prior_center_y\n\n decoded_width = np.exp(pred_width * scale_width)\n decoded_width = decoded_width * prior_width\n decoded_height = np.exp(pred_height * scale_height)\n decoded_height = decoded_height * prior_height\n\n decoded_x_min = decoded_center_x - (0.5 * decoded_width)\n decoded_y_min = decoded_center_y - (0.5 * decoded_height)\n decoded_x_max = decoded_center_x + (0.5 * decoded_width)\n decoded_y_max = decoded_center_y + (0.5 * decoded_height)\n\n decoded_boxes = np.concatenate((decoded_x_min[:, None],\n decoded_y_min[:, None],\n decoded_x_max[:, None],\n decoded_y_max[:, None]), axis=-1)\n decoded_boxes = np.clip(decoded_boxes, 0.0, 1.0)\n if predicted_boxes.shape[1] > 4:\n decoded_boxes = np.concatenate([decoded_boxes,\n predicted_boxes[:, 4:]], axis=-1)\n return decoded_boxes\n\n def _denormalize_box(self, box_coordinates, image_size):\n x_min = box_coordinates[:, 0]\n y_min = box_coordinates[:, 1]\n x_max = box_coordinates[:, 2]\n y_max = box_coordinates[:, 3]\n original_image_width, original_image_height = image_size\n x_min = x_min * original_image_width\n y_min = y_min * original_image_height\n x_max = x_max * original_image_width\n y_max = y_max * original_image_height\n return np.concatenate([x_min[:, None], y_min[:, None],\n x_max[:, None], y_max[:, None]], axis=1)\n\n def _draw_normalized_box(self, box_data, original_image_array, tf_nms=False):\n image_array = np.squeeze(original_image_array)\n image_array = image_array.astype('uint8')\n image_size = image_array.shape[0:2]\n image_size = (image_size[1], image_size[0])\n box_classes = box_data[:, 4:]\n box_coordinates = box_data[:, 0:4]\n original_coordinates = self._denormalize_box(box_coordinates,\n image_size)\n box_data = np.concatenate([original_coordinates, box_classes], axis=-1)\n if tf_nms:\n #print(box_data.shape)\n selected_indices = self.apply_non_max_suppression_tf(box_data[:, 0:4],\n np.max(box_data[:, 4:],axis=-1))\n box_data = box_data[selected_indices]\n else:\n box_data = self.apply_non_max_suppression_fast(box_data)\n\n if len(box_data) == 0:\n return\n figure, axis = plt.subplots(1)\n axis.imshow(image_array)\n x_min = box_data[:, 0]\n y_min = box_data[:, 1]\n x_max = box_data[:, 2]\n y_max = box_data[:, 3]\n width = x_max - x_min\n height = y_max - y_min\n classes = box_data[:, 4:]\n num_boxes = len(box_data)\n for box_arg in range(num_boxes):\n x_min_box = int(x_min[box_arg])\n y_min_box = int(y_min[box_arg])\n box_width = int(width[box_arg])\n box_height = int(height[box_arg])\n box_class = classes[box_arg]\n label_arg = np.argmax(box_class)\n score = box_class[label_arg]\n class_name = self.arg_to_class[label_arg]\n color = self.colors[label_arg]\n display_text = '{:0.2f}, {}'.format(score, class_name)\n cv2.rectangle(original_image_array, (x_min_box, y_min_box),\n (x_min_box + box_width, y_min_box + box_height),\n color, 2)\n cv2.putText(original_image_array, display_text,\n (x_min_box, y_min_box - 30), self.font,\n .7, color, 1, cv2.LINE_AA)\n\n def draw_boxes_in_video(self, predictions, original_image_array):\n decoded_predictions = self._decode_boxes(predictions)\n selected_boxes = self._filter_boxes(decoded_predictions)\n if len(selected_boxes) == 0:\n return\n #if len(decoded_predictions) == 0:\n #return\n self._draw_normalized_box(selected_boxes, original_image_array)\n\n def start_video(self, model):\n camera = cv2.VideoCapture(0)\n while True:\n ret, frame = camera.read()\n #print(frame.shape)\n image_array = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n image_array = image_array.astype('float32')\n image_array = cv2.resize(image_array, (300, 300))\n image_array = np.expand_dims(image_array, axis=0)\n image_array = preprocess_images(image_array)\n predictions = model.predict(image_array)\n predictions = np.squeeze(predictions)\n self.draw_boxes_in_video(predictions, frame)\n cv2.imshow('webcam', frame)\n if cv2.waitKey(1)&0xFF == ord('q'):\n break\n\n camera.release()\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n from models import SSD300\n from utils.prior_box_creator import PriorBoxCreator\n num_classes = 21\n dataset_name = 'VOC2007'\n model = SSD300(num_classes=num_classes)\n box_creator = PriorBoxCreator(model)\n prior_boxes = box_creator.create_boxes()\n weights_filename = '../trained_models/weights_SSD300.hdf5'\n model.load_weights(weights_filename)\n video = VideoTest(prior_boxes, dataset_name=dataset_name)\n video.start_video(model)\n", "sub_path": "src/video_test.py", "file_name": "video_test.py", "file_ext": "py", "file_size_in_byte": 15300, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "utils.utils.get_class_names", "line_number": 19, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm.hsv", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 21, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 21, "usage_type": "name"}, {"api_name": "numpy.linspace", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 22, "usage_type": "call"}, {"api_name": "cv2.FONT_HERSHEY_SIMPLEX", "line_number": 25, "usage_type": "attribute"}, {"api_name": "tensorflow.placeholder", "line_number": 27, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 28, "usage_type": "call"}, {"api_name": "tensorflow.image.non_max_suppression", "line_number": 31, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 31, "usage_type": "attribute"}, {"api_name": "tensorflow.Session", "line_number": 34, "usage_type": "call"}, {"api_name": "tensorflow.ConfigProto", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 39, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 46, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 51, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 54, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 58, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 69, "usage_type": "call"}, {"api_name": "tensorflow.placeholder", "line_number": 70, "usage_type": "call"}, {"api_name": "tensorflow.image.non_max_suppression", "line_number": 72, "usage_type": "call"}, {"api_name": "tensorflow.image", "line_number": 72, "usage_type": "attribute"}, {"api_name": "numpy.maximum", "line_number": 93, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 94, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 95, "usage_type": "call"}, {"api_name": "numpy.minimum", "line_number": 96, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 99, "usage_type": "call"}, {"api_name": "numpy.maximum", "line_number": 100, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 134, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 145, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 159, "usage_type": "call"}, {"api_name": "numpy.argmax", "line_number": 160, "usage_type": "call"}, {"api_name": "numpy.logical_and", "line_number": 166, "usage_type": "call"}, {"api_name": "numpy.delete", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 167, "usage_type": "call"}, {"api_name": "numpy.where", "line_number": 168, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 199, "usage_type": "call"}, {"api_name": "numpy.exp", "line_number": 201, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 209, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 213, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 215, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 229, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 233, "usage_type": "call"}, {"api_name": "numpy.concatenate", "line_number": 241, "usage_type": "call"}, {"api_name": "numpy.max", "line_number": 245, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 252, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 252, "usage_type": "name"}, {"api_name": "numpy.argmax", "line_number": 268, "usage_type": "call"}, {"api_name": "cv2.rectangle", "line_number": 273, "usage_type": "call"}, {"api_name": "cv2.putText", "line_number": 276, "usage_type": "call"}, {"api_name": "cv2.LINE_AA", "line_number": 278, "usage_type": "attribute"}, {"api_name": "cv2.VideoCapture", "line_number": 290, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 294, "usage_type": "call"}, {"api_name": "cv2.COLOR_BGR2RGB", "line_number": 294, "usage_type": "attribute"}, {"api_name": "cv2.resize", "line_number": 296, "usage_type": "call"}, {"api_name": "numpy.expand_dims", "line_number": 297, "usage_type": "call"}, {"api_name": "utils.utils.preprocess_images", "line_number": 298, "usage_type": "call"}, {"api_name": "numpy.squeeze", "line_number": 300, "usage_type": "call"}, {"api_name": "cv2.imshow", "line_number": 302, "usage_type": "call"}, {"api_name": "cv2.waitKey", "line_number": 303, "usage_type": "call"}, {"api_name": "cv2.destroyAllWindows", "line_number": 307, "usage_type": "call"}, {"api_name": "models.SSD300", "line_number": 314, "usage_type": "call"}, {"api_name": "utils.prior_box_creator.PriorBoxCreator", "line_number": 315, "usage_type": "call"}]} +{"seq_id": "191779887", "text": "\"\"\"\n# DRTB Case import\n\nThis management imports cases into HQ from a given excel file.\n\nThe script supports importing three different excel file formats (mumbai, mehsana2016, and mehsana2017), although\nonly mumbai is complete!!\n\n## Usage\n\nNOTE: You should be sure to save all excel files processed by this management command and the csv files it\nproduces. These will be essential for debugging the import should any issues arise later.\n\nExample:\n```\n$ ./manage.py import_drtb_cases enikshay drtb_cases.xlsx mumbai\n```\n(This is a dry run because no `--commit` flag is passed)\n\nEach run of the script is assigned an id, based on the current time.\nEvery run (dry or otherwise) will output two csv log files.\n\n\"drtb-import-.csv\" lists the case ids that were created, or the exception that was raised for each row\nof the import file.\n\n\"bad-rows-.csv\" lists each row that was not imported successfully, and the error message for that row.\nThis document is useful for sending back to EY for cleaning. The document also includes the original row, so\na cleaner can:\n- open this document\n- fix each error *in the same document* according to the error message\n- delete the first two columns (which list row number and error message)\n- send the document back to dimagi for re-import\nThen you can simply run the script with the modified document\n\nI've also created a companion management command to help debug issues that may occur. I'm imaging the following\ntypes of requests coming in from the field:\n\n1. \"John Doe was in the spreadsheet, but I can't find him in the app\"\nIn this case, you'll want to be able to match a row from the import to a commcare case.\nYou can run\n```\n$ ./manange.py drtb_import_history get_outcome \n```\nThis will parse the \"drtb-import-.csv\" created by `improt_drtb_cases`, and print either a list of case\nids created, or the error message that was raised in processing that row\n\n2. \"This case in the app is in an inconsistent state, I think something might have gone wrong with the import\"\nIn this case, you will want to be able to match a case id to the spreadsheet row that it was generated from.\nYou can run:\n```\n$ ./manange.py drtb_import_history get_row \n```\nThis will output a row number.\n\n\n## Design\n\nThe spec is located [here](https://docs.google.com/spreadsheets/d/1Pz-cYNvo5BkF-Sta1ol4ZzfBYIQ4kGlZ3FdJgBLe5WE/edit#gid=1273583155)\nand defines how each row in the excel sheet should be mapped to commcare cases. Each row corresponds to multiple\ncases (sometimes dozens).\n\nYou'll probably notice that a fair number of the `clean_()` functions are pretty lame, and just check\nif the given value is in some list. There has been a bit of churn on the requirements for this script, so at an\nearlier time these functions did more sophisticated conversion of messy values in the xlsx to values that matched\nour app. However it was eventually decided to have EY clean all the excel sheets before hand, which is why those\nfunctions don't do much now. I probably wouldn't have used this architecture if I was writing this from scratch.\n\nThe main components of the script are as follows:\n\n\n## `ColumnMapping`\nThis class allows accessing the values in an excel sheet row by a normalized column name. e.g.\n```\nColumnMapping.get_value(\"age\", row)\n```\nThis will return the age value from the row. The normalized column names are useful because the column index will\ndiffer between formats. This also makes it easy to change the index->column name mapping should anyone happen to\nadd columns to the sheet without telling you :)\n\n### _MAP\nEach `ColumnMapping` references a dictionary (e.g. MUMBAI_MAP) that maps the normalized column names to column\nindexes)\n\n\n## `MumbaiConstants`/`MehsanaConstants`\nThese classes hold constants specific to their respective locations which are used to populate some case properties\n\n\n## `get_case_structures_from_row()`\nThis is where the magic happens. The row is converted to CaseStructure objects, which will alter be submited to HQ\nwith the CaseFactory if this isn't a dry run. Various helper functions extract case property dicts from the row\nfor each case, then convert these to CaseStructure objects.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import unicode_literals\nimport csv\nimport decimal\nimport logging\nimport datetime\nimport traceback\nimport uuid\n\nimport re\nfrom collections import namedtuple\n\nfrom dateutil.parser import parse\nfrom django.core.management import (\n BaseCommand,\n)\nfrom django.db import models\n\nfrom casexml.apps.case.const import CASE_INDEX_EXTENSION\nfrom casexml.apps.case.mock import CaseStructure, CaseIndex, CaseFactory\nfrom corehq.apps.locations.dbaccessors import get_users_by_location_id\nfrom corehq.apps.locations.models import SQLLocation\nfrom corehq.apps.users.util import update_device_meta\nfrom corehq.util.workbook_reading import open_any_workbook\nfrom custom.enikshay.case_utils import CASE_TYPE_PERSON, CASE_TYPE_OCCURRENCE, CASE_TYPE_EPISODE, CASE_TYPE_TEST, \\\n CASE_TYPE_DRUG_RESISTANCE, CASE_TYPE_SECONDARY_OWNER\nfrom custom.enikshay.two_b_datamigration.models import MigratedDRTBCaseCounter\nfrom custom.enikshay.user_setup import compress_nikshay_id, join_chunked\nfrom memoized import memoized\nimport six\nfrom six.moves import filter\nfrom six.moves import range\n\nlogger = logging.getLogger('two_b_datamigration')\n\n\nDETECTED = \"tb_detected\"\nNOT_DETECTED = \"tb_not_detected\"\nNO_RESULT = \"no_result\"\nDRUG_R = 'r'\nDRUG_H_INHA = 'h_inha'\nDRUG_H_KATG = 'h_katg'\nDRUG_CLASS_FQ = 'fq'\nDRUG_CLASS_SLID = 'slid'\nDRUG_CLASS_FIRST = 'first_line'\nRESISTANT = 'resistant'\nSENSITIVITY = 'sensitivity'\nDRUG_ID = 'drug_id'\nDRUG_CLASS = 'drug_class'\n\n\nclass ValidationFailure(Exception):\n pass\n\n\nclass FieldValidationFailure(ValidationFailure):\n def __init__(self, value, column_name, *args, **kwargs):\n self.value = value\n self.column_name = column_name\n msg = \"Unexpected value in {} column: {}\".format(column_name, value)\n super(FieldValidationFailure, self).__init__(msg, *args, **kwargs)\n\n\n# Map format is: MDR selection criteria value -> (rft_drtb_diagnosis value, rft_drtb_diagnosis_ext_dst value)\n# TODO: (WAITING) Fill in these Nones\nSELECTION_CRITERIA_MAP = {\n \"MDR sus -Pre.Treat At diagnosis(Smear+ve/-ve)\": (\"mdr_at_diagnosis\", None),\n \"MDR sus -Pre.Treat At diagnosis(Smear+ve/-ve).\": (\"mdr_at_diagnosis\", None),\n \"MDR sus-Private Referral\": (\"private_referral\", None),\n \"MDR sus -NSP/NSN At diagnosis\": (None, None),\n \"MDR sus -Follow up Sm+ve\": (\"follow_up_sm_ve_ip\", None),\n \"MDR sus -Contact of MDR/RR TB\": (\"contact_of_mdr_rr\", None),\n \"MDR sus -New At diagnosis(Smear+ve/-ve)\": (\"mdr_at_diagnosis\", None),\n \"MDR sus -Discordance Resolution\": (\"discordance_resolution\", None),\n \"EP Presumptive\": (None, None),\n \"PLHIV Presumptive\": (None, None),\n \"Pre XDR-MDR/RR TB at Diagnosis\": (\"extended_dst\", \"mdr_rr_diagnosis\"),\n \"Pre XDR >4 months culture positive\": (\"extended_dst\", None),\n \"Pre XDR -Failure of MDR/RR-TB regimen\": (\"extended_dst\", \"mdr_rr_failure\"),\n \"Pre XDR -Recurrent case of second line treatment\": (\"extended_dst\", \"recurrent_second_line_treatment\"),\n \"Pre XDR -Culture reversion\": (\"extended_dst\", \"culture_reversion\"),\n \"Paediatric Presumptive\": (None, None),\n \"HIV -EP TB\": (None, None),\n \"HIV TB (Smear+ve)\": (None, None),\n \"HIV TB (Smear+ve at diagnosis)\": (None, None),\n \"Other\": (None, None),\n}\n\n\n# A map of column identifier to column index in the Mehsana 2016 excel sheet.\nMEHSANA_2017_MAP = {\n \"testing_facility\": 1,\n \"person_name\": 3,\n \"mdr_selection_criteria\": 4,\n \"district_name\": 5,\n \"report_sending_date\": 6,\n \"nikshay_id\": 7,\n \"s\": 10,\n \"h_inha\": 11,\n \"h_katg\": 12,\n \"r\": 13,\n \"e\": 14,\n \"z\": 15,\n \"km\": 16,\n \"cm\": 17,\n \"am\": 18,\n \"lfx\": 19,\n \"mfx_05\": 20,\n \"mfx_20\": 21,\n \"pas\": 22,\n \"lzd\": 23,\n \"cfz\": 24,\n \"eto\": 25,\n \"clr\": 26,\n \"azi\": 27,\n \"treatment_status\": 35,\n \"drtb_number\": 36,\n \"treatment_initiation_date\": 37,\n \"reason_for_not_initiation_on_treatment\": 41,\n \"type_of_treatment_initiated\": 44,\n \"date_put_on_mdr_treatment\": 45,\n \"month_3_follow_up_send_date\": 47,\n \"month_3_follow_up_result_date\": 48,\n \"month_3_follow_up_result\": 50,\n \"month_4_follow_up_send_date\": 51,\n \"month_4_follow_up_result_date\": 52,\n \"month_4_follow_up_result\": 54,\n \"month_5_follow_up_send_date\": 55,\n \"month_5_follow_up_result_date\": 56,\n \"month_5_follow_up_result\": 58,\n \"month_6_follow_up_send_date\": 59,\n \"month_6_follow_up_result_date\": 60,\n \"month_6_follow_up_result\": 62,\n \"month_9_follow_up_send_date\": 63,\n \"month_9_follow_up_result_date\": 64,\n \"month_9_follow_up_result\": 66,\n \"month_12_follow_up_send_date\": 67,\n \"month_12_follow_up_result_date\": 68,\n \"month_12_follow_up_result\": 70,\n \"month_end_follow_up_send_date\": 71,\n \"month_end_follow_up_result_date\": 72,\n \"month_end_follow_up_result\": 74,\n \"treatment_outcome\": 75,\n \"date_of_treatment_outcome\": 76,\n}\n\n\n# A map of column identifier to column index in the Mehsana 2017 excel sheet.\nMEHSANA_2016_MAP = {\n \"person_name\": 3,\n \"district_name\": 5,\n \"report_sending_date\": 7,\n \"treatment_initiation_date\": 12,\n \"registration_date\": 13,\n \"date_put_on_mdr_treatment\": 19,\n \"type_of_treatment_initiated\": 47,\n \"mdr_selection_criteria\": 4,\n \"testing_facility\": 1,\n \"dst_result\": 6,\n \"month_3_follow_up_send_date\": 50,\n \"month_3_follow_up_result_date\": 51,\n \"month_3_follow_up_result\": 52,\n \"month_4_follow_up_send_date\": 53,\n \"month_4_follow_up_result_date\": 54,\n \"month_4_follow_up_result\": 55,\n \"month_6_follow_up_send_date\": 56,\n \"month_6_follow_up_result_date\": 57,\n \"month_6_follow_up_result\": 58,\n \"s\": 28,\n \"h_inha\": 29,\n \"h_katg\": 30,\n \"r\": 31,\n \"e\": 32,\n \"z\": 33,\n \"km\": 34,\n \"cm\": 35,\n \"am\": 36,\n \"lfx\": 37,\n \"mfx_05\": 38,\n \"mfx_20\": 39,\n \"pas\": 40,\n \"lzd\": 41,\n \"cfz\": 42,\n \"eto\": 43,\n \"clr\": 44,\n \"azi\": 45,\n}\n\n# A map of column identifier to column index in the Mumbai excel sheet.\nMUMBAI_MAP = {\n \"drtb_number\": 3,\n \"nikshay_id\": 5,\n \"registration_date\": 7,\n \"person_name\": 8,\n \"sex\": 9,\n \"age_entered\": 10,\n \"address\": 11,\n \"phone_number\": 12,\n \"occupation\": 13,\n \"marital_status\": 14,\n \"social_scheme\": 15,\n \"key_populations\": 16,\n \"initial_home_visit_by\": 17,\n \"initial_home_visit_date\": 18,\n \"aadhaar_number\": 19,\n \"drtb_center_code\": 21,\n \"district_name\": 22,\n \"phi_name\": 25,\n \"reason_for_testing\": 27,\n \"site_of_disease\": 28,\n \"type_of_patient\": 29,\n \"weight\": 30,\n \"weight_band\": 31,\n \"height\": 32,\n \"hiv_status\": 33,\n \"hiv_test_date\": 34,\n \"hiv_program_id\": 35,\n \"cpt_initiation_date\": 36,\n \"art_initiation_date\": 37,\n \"diabetes\": 38,\n \"cbnaat_lab\": 39, # This is similar to testing_facility, but slightly different\n \"cbnaat_lab_number\": 40,\n \"cbnaat_sample_date\": 41,\n \"cbnaat_result\": 42,\n \"cbnaat_result_date\": 43,\n \"lpa_lab\": 44,\n \"lpa_lab_number\": 45,\n \"lpa_sample_date\": 46,\n \"lpa_rif_result\": 47,\n \"lpa_inh_result\": 48,\n \"lpa_result_date\": 49,\n \"sl_lpa_lab\": 50,\n \"sl_lpa_lab_number\": 51,\n \"sl_lpa_sample_date\": 52,\n \"sl_lpa_result\": 53,\n \"sl_lpa_result_date\": 54,\n \"culture_lab\": 55,\n \"culture_lab_number\": 56,\n \"culture_sample_date\": 57,\n \"culture_type\": 58,\n \"culture_result\": 59,\n \"culture_result_date\": 60,\n \"dst_sample_date\": 61,\n \"dst_type\": 62,\n \"lfx\": 63, # Levo\n \"eto\": 64, # Ethionamide\n \"cs\": 65, # Cycloserine\n \"e\": 66, # Ethambutol\n \"z\": 67, # PZA\n \"km\": 68, # Kana\n \"cm\": 69, # Capr\n \"mfx_05\": 70, # Moxi\n \"mfx_20\": 71, # High dose Moxi\n \"cfz\": 72, # Clofa\n \"lzd\": 73, # Line\n \"h_inha\": 74, # High dose INH\n \"h_katg\": 75,\n \"pas\": 76, # Na-Pas\n \"ofx\": 77, # Oflox\n \"s\": 78,\n \"clr\": 79,\n \"r\": 80, # Rif\n \"amx_clv\": 81,\n \"am\": 82,\n \"dst_result_date\": 83,\n \"treatment_initiation_date\": 89,\n \"treatment_regimen\": 93,\n \"ip_to_cp_date\": 95,\n \"treatment_outcome\": 200,\n \"date_of_treatment_outcome\": 201,\n}\n\nDRUG_MAP = {\n \"r\": {\n \"sort_order\": \"01\",\n \"drug_name\": \"R\",\n \"drug_class\": \"first_line\",\n },\n \"s\": {\n \"sort_order\": \"04\",\n \"drug_name\": \"S\",\n \"drug_class\": \"first_line\",\n },\n \"h_inha\": {\n \"sort_order\": \"02\",\n \"drug_name\": \"H (inhA)\",\n \"drug_class\": \"first_line\",\n },\n \"h_katg\": {\n \"sort_order\": \"03\",\n \"drug_name\": \"H (katG)\",\n \"drug_class\": \"first_line\",\n },\n \"e\": {\n \"sort_order\": \"05\",\n \"drug_name\": \"E\",\n \"drug_class\": \"first_line\",\n },\n \"z\": {\n \"sort_order\": \"06\",\n \"drug_name\": \"Z\",\n \"drug_class\": \"first_line\",\n },\n \"slid_class\": {\n \"sort_order\": \"07\",\n \"drug_name\": \"SLID Drugs\",\n \"drug_class\": \"slid\",\n },\n \"km\": {\n \"sort_order\": \"08\",\n \"drug_name\": \"Km\",\n \"drug_class\": \"slid\",\n },\n \"cm\": {\n \"sort_order\": \"09\",\n \"drug_name\": \"Cm\",\n \"drug_class\": \"slid\",\n },\n \"am\": {\n \"sort_order\": \"10\",\n \"drug_name\": \"Am\",\n \"drug_class\": \"slid\",\n },\n \"fq_class\": {\n \"sort_order\": \"11\",\n \"drug_name\": \"FQ Drugs\",\n \"drug_class\": \"fq\",\n },\n \"lfx\": {\n \"sort_order\": \"12\",\n \"drug_name\": \"Lfx\",\n \"drug_class\": \"fq\",\n },\n \"mfx_05\": {\n \"sort_order\": \"14\",\n \"drug_name\": \"Mfx (0.5)\",\n \"drug_class\": \"fq\",\n },\n \"mfx_20\": {\n \"sort_order\": \"15\",\n \"drug_name\": \"Mfx (2.0)\",\n \"drug_class\": \"fq\",\n },\n \"eto\": {\n \"sort_order\": \"16\",\n \"drug_name\": \"Eto\",\n \"drug_class\": \"other\",\n },\n \"pas\": {\n \"sort_order\": \"17\",\n \"drug_name\": \"PAS\",\n \"drug_class\": \"other\",\n },\n \"lzd\": {\n \"sort_order\": \"18\",\n \"drug_name\": \"Lzd\",\n \"drug_class\": \"other\",\n },\n \"cfz\": {\n \"sort_order\": \"19\",\n \"drug_name\": \"Cfz\",\n \"drug_class\": \"other\",\n },\n \"clr\": {\n \"sort_order\": \"20\",\n \"drug_name\": \"Clr\",\n \"drug_class\": \"other\",\n },\n \"azi\": {\n \"sort_order\": \"21\",\n \"drug_name\": \"Azi\",\n \"drug_class\": \"other\",\n },\n \"bdq\": {\n \"sort_order\": \"22\",\n \"drug_name\": \"Bdq\",\n \"drug_class\": \"other\",\n },\n \"dlm\": {\n \"sort_order\": \"23\",\n \"drug_name\": \"Dlm\",\n \"drug_class\": \"other\",\n },\n \"cs\": {\n \"sort_order\": \"24\",\n \"drug_name\": \"CS\",\n \"drug_class\": \"other\",\n },\n \"ofx\": {\n \"sort_order\": \"25\",\n \"drug_name\": \"OFX\",\n \"drug_class\": \"fq\",\n },\n \"amx_clv\": {\n \"sort_order\": \"26\",\n \"drug_name\": \"AMX/CLV\",\n \"drug_class\": \"other\",\n },\n}\n\nALL_MAPPING_DICTS = (MEHSANA_2016_MAP, MEHSANA_2017_MAP, MUMBAI_MAP)\n\nPREV_OCCURRENCE_PROPERTIES = [\n \"disease_classification\",\n \"site_choice\",\n \"site_detail\",\n]\n\nPREV_EPISODE_PROPERTIES = [\n \"adherence_total_doses_taken\",\n \"adherence_type_choice\",\n \"adherence_type_ict_choice\",\n \"adherence_type_ict_other_detail\",\n \"adherence_type_other_detail\",\n \"adr_history\",\n \"date_of_diagnosis\",\n \"dosage_display\",\n \"dosage_history\",\n \"drtb_meetings_history\",\n \"drug_resistance_display\",\n \"episode_type\",\n \"treatment_initiation_date\",\n \"treatment_outcome\",\n \"treatment_outcome_date\",\n \"treatment_outcome_loss_to_follow_up_reason\",\n \"treatment_regimen\",\n \"treatment_status\",\n \"treatment_status_other\",\n \"weight_band\",\n \"weight_history\",\n]\n\nPREV_PERSON_PROPERTIES = [\n \"phi_name\",\n]\n\n\nclass ColumnMapping(object):\n mapping_dict = None\n required_fields = []\n\n @classmethod\n def get_value(cls, normalized_column_name, row):\n try:\n column_index = cls.mapping_dict[normalized_column_name]\n return row[column_index].value\n except KeyError:\n return cls.handle_mapping_miss(normalized_column_name)\n except IndexError:\n return None\n\n @classmethod\n def handle_mapping_miss(cls, normalized_column_name):\n exists_in_some_mapping = False\n for mapping in ALL_MAPPING_DICTS:\n if normalized_column_name in mapping:\n exists_in_some_mapping = True\n break\n if exists_in_some_mapping:\n return None\n else:\n raise KeyError(\n \"Invalid normalized_column_name '{}' passed to ColumnMapping.get_value()\".format(\n normalized_column_name\n )\n )\n\n @classmethod\n def check_for_required_fields(cls, row):\n \"\"\"Raise an exception if row is missing a required field\"\"\"\n for key in cls.required_fields:\n val = cls.get_value(key, row)\n if not val:\n raise ValidationFailure(\"{} is required\".format(key))\n\n\nclass Mehsana2017ColumnMapping(ColumnMapping):\n mapping_dict = MEHSANA_2017_MAP\n required_fields = (\n \"person_name\",\n \"district_name\",\n )\n\n\nclass Mehsana2016ColumnMapping(ColumnMapping):\n mapping_dict = MEHSANA_2016_MAP\n required_fields = (\n \"person_name\",\n \"district_name\",\n )\n\n\nclass MumbaiColumnMapping(ColumnMapping):\n mapping_dict = MUMBAI_MAP\n required_fields = (\n \"registration_date\",\n \"person_name\",\n \"district_name\",\n \"phi_name\",\n # The phi must also be valid, but this is checked in the match_phi function.\n )\n follow_up_culture_index_start = 96\n follow_up_culture_month_start = 3\n\n @classmethod\n def get_follow_up_culture_result(cls, month, row):\n index = cls._get_follow_up_start_index(month)\n try:\n return row[index].value\n except IndexError:\n return None\n\n @classmethod\n def get_follow_up_culture_lab(cls, month, row):\n index = cls._get_follow_up_start_index(month) + 1\n try:\n return row[index].value\n except IndexError:\n return None\n\n @classmethod\n def get_follow_up_culture_date(cls, month, row):\n index = cls._get_follow_up_start_index(month) + 2\n try:\n return row[index].value\n except IndexError:\n return None\n\n @classmethod\n def _get_follow_up_start_index(cls, month):\n if month == 36:\n # For some reason the sheet jumps from 33 to 36, so just special casing it.\n # This will just treat month=36 as the next columns after month 33\n month = 34\n\n offset = (month - 3) * 3\n index = cls.follow_up_culture_index_start + offset\n return index\n\n\nclass MumbaiConstants(object):\n \"\"\"A collection of Mumbai specific constants\"\"\"\n # TODO: (WAITING) Fill in these values\n # This is waiting on upload of the locations. It looks like for mumbai these might not be constants\n drtb_center_name = None\n drtb_center_id = None\n\n\nclass MehsanaConstants(object):\n \"\"\"A collection of Mehsana specific constants\"\"\"\n # TODO: (WAITING) Fill in these values\n # This is waiting on upload of the locations\n drtb_center_name = None\n drtb_center_id = None\n\n\ndef get_case_structures_from_row(commit, domain, migration_id, column_mapping, city_constants, row):\n \"\"\"\n Return a list of CaseStructure objects corresponding to the information in the given row.\n \"\"\"\n person_case_properties = get_person_case_properties(domain, column_mapping, row)\n occurrence_case_properties = get_occurrence_case_properties(column_mapping, row)\n episode_case_properties = get_episode_case_properties(domain, column_mapping, city_constants, row)\n test_case_properties = get_test_case_properties(\n domain, column_mapping, row, episode_case_properties['treatment_initiation_date'])\n drug_resistance_case_properties = get_drug_resistance_case_properties(\n column_mapping, row, test_case_properties)\n secondary_owner_case_properties = get_secondary_owner_case_properties(\n domain, city_constants, column_mapping, row, person_case_properties['dto_id'])\n\n # We do this as a separate step because we don't want to generate ids if there is going to be an exception\n # raised while generating the other properties.\n update_cases_with_readable_ids(\n commit, domain, person_case_properties, occurrence_case_properties, episode_case_properties,\n secondary_owner_case_properties\n )\n\n # Close the occurrence if we have a treatment outcome recorded\n close_occurrence = False\n close_person = False\n if (\n \"treatment_outcome\" in episode_case_properties\n and episode_case_properties[\"treatment_outcome\"]\n ):\n close_occurrence = True\n person_case_properties.update(\n get_prev_person_case_properties(PREV_OCCURRENCE_PROPERTIES, occurrence_case_properties))\n person_case_properties.update(\n get_prev_person_case_properties(PREV_EPISODE_PROPERTIES, episode_case_properties))\n person_case_properties.update(\n get_prev_person_case_properties(PREV_PERSON_PROPERTIES, person_case_properties))\n person_case_properties['prev_drtb_center_name'] = \\\n secondary_owner_case_properties[0]['secondary_owner_name']\n\n if episode_case_properties[\"treatment_outcome\"] == \"died\":\n close_person = True\n else:\n person_case_properties['owner_id'] = '_archive_'\n person_case_properties['phi_name'] = ''\n person_case_properties['tu_name'] = ''\n person_case_properties['tu_id'] = ''\n person_case_properties['dto_name'] = ''\n person_case_properties['dto_id'] = ''\n\n person_case_properties['current_episode_type'] = ''\n person_case_properties['current_disease_classification'] = ''\n person_case_properties['current_site_choice'] = ''\n person_case_properties['current_site_detail'] = ''\n\n # calculate episode_case_id so we can also set it on all tests\n episode_case_id = uuid.uuid4().hex\n\n # update the drtb type based on the drug resistance info\n drug_resistance_info = [\n {\n DRUG_ID: d[DRUG_ID],\n SENSITIVITY: d[SENSITIVITY],\n DRUG_CLASS: DRUG_MAP[d[DRUG_ID]][DRUG_CLASS],\n }\n for d in drug_resistance_case_properties\n ]\n episode_case_properties['drtb_type'] = get_drtb_type(drug_resistance_info)\n\n for test in test_case_properties:\n test['episode_case_id'] = episode_case_id\n person_case_structure = get_case_structure(CASE_TYPE_PERSON, person_case_properties, migration_id,\n close=close_person)\n occurrence_case_structure = get_case_structure(\n CASE_TYPE_OCCURRENCE, occurrence_case_properties, migration_id, host=person_case_structure,\n close=close_occurrence)\n episode_case_structure = get_case_structure(\n CASE_TYPE_EPISODE, episode_case_properties, migration_id, host=occurrence_case_structure,\n case_id=episode_case_id, close=close_occurrence)\n drug_resistance_case_structures = [\n get_case_structure(CASE_TYPE_DRUG_RESISTANCE, props, migration_id, host=occurrence_case_structure,\n close=close_occurrence)\n for props in drug_resistance_case_properties\n ]\n test_case_structures = [\n get_case_structure(CASE_TYPE_TEST, props, migration_id, host=occurrence_case_structure,\n close=close_occurrence)\n for props in test_case_properties\n ]\n secondary_owner_case_structures = [\n get_case_structure(CASE_TYPE_SECONDARY_OWNER, props, migration_id, host=occurrence_case_structure,\n close=close_occurrence)\n for props in secondary_owner_case_properties\n ]\n\n return [\n person_case_structure,\n occurrence_case_structure,\n episode_case_structure,\n ] + secondary_owner_case_structures + drug_resistance_case_structures + test_case_structures\n\n\ndef update_cases_with_readable_ids(commit, domain, person_case_properties, occurrence_case_properties,\n episode_case_properties, secondary_owner_case_properties):\n phi_id = person_case_properties['owner_id']\n person_id_flat = _PersonIdGenerator.generate_person_id_flat(domain, phi_id, commit)\n person_id = join_chunked(person_id_flat, 3)\n occurrence_id = person_id + \"-O1\"\n episode_id = person_id + \"-E1\"\n\n person_case_properties['person_id'] = person_id\n person_case_properties['person_id_flat'] = person_id_flat\n occurrence_case_properties[\"occurrence_id\"] = occurrence_id\n occurrence_case_properties[\"name\"] = occurrence_id\n episode_case_properties['episode_id'] = episode_id\n episode_case_properties['name'] = episode_id\n for secondary_owner in secondary_owner_case_properties:\n secondary_owner['name'] = occurrence_id + secondary_owner['secondary_owner_type']\n\n\ndef get_case_structure(case_type, properties, migration_identifier, host=None, close=False, case_id=None):\n \"\"\"\n Converts a properties dictionary to a CaseStructure object\n \"\"\"\n if not case_id:\n case_id = uuid.uuid4().hex\n owner_id = properties.pop(\"owner_id\")\n props = {k: v for k, v in six.iteritems(properties) if v is not None}\n props['created_by_migration'] = migration_identifier\n props['migration_data_source'] = \"excel_document\"\n props['migration_type'] = \"pmdt_excel\"\n kwargs = {\n \"case_id\": case_id,\n \"walk_related\": False,\n \"attrs\": {\n \"case_type\": case_type,\n \"create\": True,\n \"owner_id\": owner_id,\n \"update\": props,\n \"close\": close,\n },\n }\n if host:\n kwargs['indices'] = [CaseIndex(\n host,\n identifier='host',\n relationship=CASE_INDEX_EXTENSION,\n related_type=host.attrs['case_type'],\n )]\n return CaseStructure(**kwargs)\n\n\ndef get_person_case_properties(domain, column_mapping, row):\n person_name = column_mapping.get_value(\"person_name\", row)\n xlsx_district_name = column_mapping.get_value(\"district_name\", row)\n district_name, district_id = match_district(domain, xlsx_district_name)\n phi_name, phi_id = match_phi(domain, column_mapping.get_value(\"phi_name\", row))\n tu_name, tu_id = get_tu(domain, phi_id)\n age = clean_age_entered(column_mapping.get_value(\"age_entered\", row))\n\n properties = {\n \"name\": person_name,\n \"dto_name\": district_name,\n \"dto_id\": district_id,\n \"owner_id\": phi_id,\n \"current_episode_type\": \"confirmed_drtb\",\n \"sex\": clean_sex(column_mapping.get_value(\"sex\", row)),\n \"age_entered\": age,\n \"age\": age,\n \"dob\": calculate_dob(column_mapping.get_value(\"age_entered\", row)),\n \"current_address\": column_mapping.get_value(\"address\", row),\n \"aadhaar_number\": column_mapping.get_value(\"aadhaar_number\", row),\n \"phi_name\": phi_name,\n \"tu_name\": tu_name,\n \"tu_id\": tu_id,\n \"hiv_status\": clean_hiv_status(column_mapping.get_value(\"hiv_status\", row)),\n \"hiv_test_date\": clean_date(column_mapping.get_value(\"hiv_test_date\", row)),\n \"hiv_program_id\": column_mapping.get_value(\"hiv_program_id\", row),\n \"cpt_initiation_date\": clean_date(column_mapping.get_value(\"cpt_initiation_date\", row)),\n \"art_initiation_date\": clean_date(column_mapping.get_value(\"art_initiation_date\", row)),\n \"diabetes_status\": clean_diabetes_status(column_mapping.get_value(\"diabetes\", row)),\n \"language_code\": \"hin\",\n \"case_version\": \"20\",\n \"enrolled_in_private\": \"false\",\n \"dataset\": \"real\",\n }\n\n properties.update(get_disease_site_properties_for_person(column_mapping, row))\n\n if properties[\"cpt_initiation_date\"]:\n properties[\"cpt_initiated\"] = \"yes\"\n if properties[\"art_initiation_date\"]:\n properties[\"art_initiated\"] = \"yes\"\n\n phone_number = column_mapping.get_value(\"phone_number\", row)\n if phone_number:\n clean_number = clean_phone_number(phone_number)\n contact_number = clean_contact_phone_number(clean_number)\n properties['contact_phone_number'] = contact_number\n properties['phone_number'] = clean_number\n\n social_scheme = column_mapping.get_value(\"social_scheme\", row)\n properties[\"socioeconomic_status\"] = clean_socioeconomic_status(social_scheme)\n\n occupation = column_mapping.get_value(\"occupation\", row)\n properties[\"occupation\"] = clean_occupation(occupation)\n\n marital_status = column_mapping.get_value(\"marital_status\", row)\n properties[\"marital_status\"] = clean_marital_status(marital_status)\n\n return properties\n\n\ndef get_occurrence_case_properties(column_mapping, row):\n initial_visit_date = column_mapping.get_value(\"initial_home_visit_date\", row)\n properties = {\n \"owner_id\": \"-\",\n \"current_episode_type\": \"confirmed_drtb\",\n \"initial_home_visit_status\": \"completed\" if initial_visit_date else None,\n \"ihv_date\": clean_date(initial_visit_date),\n \"ihv_by\": column_mapping.get_value(\"initial_home_visit_by\", row) if initial_visit_date else None,\n 'name': 'Occurrence #1',\n 'occurrence_episode_count': 1,\n }\n properties.update(get_disease_site_properties(column_mapping, row))\n properties.update(get_key_populations(column_mapping, row))\n\n return properties\n\n\ndef get_episode_case_properties(domain, column_mapping, city_constants, row):\n phi_name, phi_id = match_phi(domain, column_mapping.get_value(\"phi_name\", row))\n report_sending_date = column_mapping.get_value(\"report_sending_date\", row)\n report_sending_date = clean_date(report_sending_date)\n\n treatment_initiation_date = column_mapping.get_value(\"treatment_initiation_date\", row)\n treatment_initiation_date = clean_date(treatment_initiation_date)\n\n treatment_card_completed_date = column_mapping.get_value(\"registration_date\", row)\n treatment_card_completed_date = clean_date(treatment_card_completed_date)\n if not treatment_card_completed_date:\n treatment_card_completed_date = treatment_initiation_date\n\n drtb_center_name, drtb_center_id = get_drtb_center_location(domain, column_mapping, row, city_constants)\n\n properties = {\n \"owner_id\": \"-\",\n \"treatment_initiating_drtb_center_id\": drtb_center_id,\n \"episode_type\": \"confirmed_drtb\",\n \"episode_pending_registration\": \"no\",\n \"is_active\": \"yes\",\n \"diagnosing_facility_id\": phi_id,\n \"diagnosing_facility_name\": phi_name,\n \"treatment_initiation_date\": treatment_initiation_date,\n \"date_referral_to_drtb_center\": treatment_initiation_date,\n \"treatment_card_completed_date\": treatment_card_completed_date,\n \"nikshay_id\": column_mapping.get_value(\"nikshay_id\", row),\n \"manual_nikshay_id\": \"yes\",\n \"pmdt_tb_number\": column_mapping.get_value(\"drtb_number\", row),\n \"treatment_status_other\": column_mapping.get_value(\"reason_for_not_initiation_on_treatment\", row),\n \"treatment_outcome\": get_treatment_outcome(column_mapping, row),\n \"treatment_outcome_date\": clean_date(column_mapping.get_value(\"date_of_treatment_outcome\", row)),\n \"weight\": column_mapping.get_value(\"weight\", row),\n \"weight_band\": clean_weight_band(column_mapping.get_value(\"weight_band\", row)),\n \"height\": clean_height(column_mapping.get_value(\"height\", row)),\n \"treatment_regimen\": clean_treatment_regimen(column_mapping.get_value(\"treatment_regimen\", row)),\n \"regimen_change_history\": get_episode_regimen_change_history(\n column_mapping, row, treatment_initiation_date),\n \"patient_type_choice\": clean_patient_type(column_mapping.get_value(\"type_of_patient\", row)),\n \"adherence_schedule_id\": \"schedule_daily\",\n }\n\n # this code is specifically for Mehsana since we dont' have a treatment status in Mumbai\n # need to update once we get Excel to figure out how to determine treatment initiating facility ID\n raw_treatment_status = column_mapping.get_value(\"treatment_status\", row)\n if raw_treatment_status:\n treatment_status_id = convert_treatment_status(raw_treatment_status)\n properties[\"treatment_status\"] = treatment_status_id\n if treatment_status_id not in (\"other\", \"\", None):\n properties[\"treatment_initiated\"] = \"yes_phi\"\n\n properties.update(get_selection_criteria_properties(column_mapping, row))\n if treatment_initiation_date:\n properties[\"treatment_initiated\"] = \"yes_phi\"\n if 'treatment_status' not in properties:\n properties[\"treatment_initiating_facility_id\"] = phi_id\n properties['treatment_status'] = 'initiated_second_line_treatment'\n\n properties.update(get_diagnosis_properties(column_mapping, domain, row))\n\n properties.update(get_reason_for_test_properties(column_mapping, row))\n\n ip_to_cp_date = clean_date(column_mapping.get_value(\"ip_to_cp_date\", row))\n if ip_to_cp_date:\n properties.update({\n \"cp_initiated\": \"yes\",\n \"cp_initiation_date\": ip_to_cp_date,\n })\n\n if not properties.get(\"date_of_diagnosis\"):\n properties[\"date_of_diagnosis\"] = properties.get(\"treatment_initiation_date\")\n\n return properties\n\n\ndef get_reason_for_test_properties(column_mapping, row):\n value = column_mapping.get_value(\"reason_for_testing\", row)\n if not value:\n return {}\n clean_value = value.lower()\n\n rft_drtb_diagnosis_ext_dst_tmonth = None\n if isinstance(clean_value, (int, float, decimal.Decimal)):\n rft_drtb_diagnosis = \"extended_dst\"\n rft_drtb_diagnosis_ext_dst = \"3_monthly_culture_positives\"\n rft_drtb_diagnosis_ext_dst_tmonth = value\n else:\n try:\n rft_drtb_diagnosis, rft_drtb_diagnosis_ext_dst = {\n \"at diagnosis\": [\"mdr_at_diagnosis\", None],\n \"contact of mdr/rr tb\": [\"contact_of_mdr_rr\", None],\n \"follow up sm+ve at end of ip and cp\": [\"follow_up_sm_ve_ip\", None],\n \"private referral\": [\"private_referral\", None],\n \"discordance resolution\": [\"discordance_resolution\", None],\n \"mdr/rr at diagnosis\": [\"extended_dst\", \"mdr_rr_diagnosis\"],\n \"more than 4 months culture positive\": [\"extended_dst\", \"4mo_culture_positive\"],\n \"3 monthly, for persistent culture positive\": [\"extended_dst\", \"3_monthly_culture_positives\"],\n \"failure of mdr/rr-tb regimen\": [\"extended_dst\", \"mdr_rr_failure\"],\n \"culture reversion\": [\"extended_dst\", \"culture_reversion\"],\n \"recurrent case of second line treatment\": [\"extended_dst\", \"recurrent_second_line_treatment\"],\n }[clean_value]\n except KeyError:\n raise FieldValidationFailure(value, \"Reason for Testing\")\n\n return {\n \"rft_general\": \"diagnosis_drtb\",\n \"rft_drtb_diagnosis\": rft_drtb_diagnosis,\n \"rft_drtb_diagnosis_ext_dst\": rft_drtb_diagnosis_ext_dst,\n \"rft_drtb_diagnosis_ext_dst_tmonth\": rft_drtb_diagnosis_ext_dst_tmonth,\n }\n\n\ndef get_diagnosis_properties(column_mapping, domain, row):\n properties = {}\n diagnosing_test = None\n if column_mapping.get_value(\"cbnaat_result\", row):\n diagnosing_test = get_cbnaat_test_case_properties(domain, column_mapping, row)\n elif column_mapping.get_value(\"lpa_rif_result\", row) or column_mapping.get_value(\"lpa_inh_result\", row):\n diagnosing_test = get_lpa_test_case_properties(domain, column_mapping, row)\n elif column_mapping.get_value(\"sl_lpa_result\", row):\n diagnosing_test = get_sl_lpa_test_case_properties(domain, column_mapping, row)\n elif column_mapping.get_value(\"culture_result\", row):\n diagnosing_test = get_culture_test_case_properties(domain, column_mapping, row)\n\n if diagnosing_test:\n properties[\"diagnosis_test_type_label\"] = diagnosing_test['test_type_label']\n properties[\"diagnosis_test_type_value\"] = diagnosing_test['test_type_value']\n properties[\"diagnosis_test_drug_resistance_list\"] = diagnosing_test['drug_resistance_list']\n properties[\"diagnosis_test_drug_sensitive_list\"] = diagnosing_test['drug_sensitive_list']\n properties[\"diagnosis_lab_facility_id\"] = diagnosing_test['testing_facility_id']\n properties[\"diagnosis_lab_facility_name\"] = diagnosing_test['testing_facility_name']\n properties[\"diagnosis_test_result_date\"] = diagnosing_test['date_reported']\n properties[\"diagnosis_test_specimen_date\"] = diagnosing_test['date_tested']\n properties[\"diagnosis_test_summary\"] = diagnosing_test['result_summary_display']\n properties[\"date_of_diagnosis\"] = diagnosing_test['date_reported']\n\n # There are some cases that no diagnosis test (~109). They don't have a date of diagnosis\n\n return properties\n # TODO: (WAITING) figure out how to set these properties based on other info\n\n\ndef get_disease_site_properties(column_mapping, row):\n xlsx_value = column_mapping.get_value(\"site_of_disease\", row)\n if not xlsx_value:\n return {}\n value = xlsx_value.replace('EP ', 'extra pulmonary ').lower().strip()\n\n try:\n classification, site_choice, site_detail = {\n \"pulmonary\": [\"pulmonary\", None, None],\n \"extra pulmonary\": [\"extra_pulmonary\", None, None],\n \"extra pulmonary (lymph node)\": [\"extra_pulmonary\", \"lymph_node\", None],\n \"extra pulmonary (spine)\": [\"extra_pulmonary\", \"spine\", None],\n \"extra pulmonary (brain)\": [\"extra_pulmonary\", \"brain\", None],\n \"extra pulmonary (pleural effusion)\": [\"extra_pulmonary\", \"pleural_effusion\", None],\n \"extra pulmonary (abdominal)\": [\"extra_pulmonary\", \"abdominal\", None],\n }[value]\n except KeyError:\n match = re.match(\"^.*\\((.*)\\)\", value)\n if \"extra_pulmonary\" not in value and not match:\n raise FieldValidationFailure(value, \"Site of Disease\")\n\n classification = \"extra_pulmonary\"\n site_choice = \"other\"\n site_detail = match.groups()[0]\n\n return {\n \"disease_classification\": classification,\n \"site_choice\": site_choice,\n \"site_detail\": site_detail,\n }\n\n\ndef get_key_populations(column_mapping, row):\n value = column_mapping.get_value(\"key_populations\", row)\n if not value:\n return {}\n clean_value = value.lower()\n try:\n key_populations, key_population_other_detail = {\n \"slum dweller\": [\"slum_dweller\", None],\n \"migrant\": [\"migrant\", None],\n \"contact of known tb patients\": [\"known_patient_contact\", None],\n \"refugee\": [\"refugee\", None],\n \"other (health care worker)\": [\"health_care_worker\", None],\n \"other (minor)\": [\"other\", \"minor\"],\n \"other (diabetic)\": [\"other\", \"diabetic\"],\n \"other (na)\": [None, None],\n \"na\": [None, None],\n }[clean_value]\n except KeyError:\n raise FieldValidationFailure(value, \"Key Populations\")\n\n return {\n \"key_populations\": key_populations,\n \"key_population_other_detail\": key_population_other_detail,\n }\n\n\ndef get_disease_site_properties_for_person(column_mapping, row):\n props = get_disease_site_properties(column_mapping, row)\n return {\"current_{}\".format(k): v for k, v in six.iteritems(props)}\n\n\ndef get_prev_person_case_properties(property_list, case_properties):\n return {\n \"prev_{}\".format(p): case_properties[p] for p in property_list if p in case_properties\n }\n\n\ndef get_treatment_outcome(column_mapping, row):\n value = column_mapping.get_value(\"treatment_outcome\", row)\n if not value:\n return None\n clean_value = value.lower().strip().replace(' ', '_')\n if clean_value not in [\n \"cured\",\n \"died\",\n \"treatment_complete\",\n \"failure\",\n \"loss_to_follow_up\",\n \"regimen_changed\",\n \"pediatric_failure_to_respond\",\n \"not_evaluated\",\n \"treatment_failure_culture_non_reversion\",\n \"treatment_failure_culture_reversion\",\n \"treatment_failure_additional_drug_resistance\",\n \"treatment_failure_adverse_drug_reaction\",\n ]:\n raise FieldValidationFailure(value, \"treatment outcome\")\n return clean_value\n\n\ndef get_selection_criteria_properties(column_mapping, row):\n selection_criteria_value = column_mapping.get_value(\"mdr_selection_criteria\", row)\n if not selection_criteria_value:\n return {}\n rft_drtb_diagnosis, rft_drtb_diagnosis_ext_dst = SELECTION_CRITERIA_MAP[selection_criteria_value]\n\n properties = {\n \"rft_general\": \"drtb_diagnosis\",\n }\n if rft_drtb_diagnosis:\n properties[\"rft_drtb_diagnosis\"] = rft_drtb_diagnosis\n if rft_drtb_diagnosis_ext_dst:\n properties[\"rft_drtb_diagnosis_ext_dst\"] = rft_drtb_diagnosis_ext_dst\n return properties\n\n\ndef get_cbnaat_test_resistance_properties(column_mapping, row):\n resistant = get_cbnaat_resistance(column_mapping, row)\n if resistant:\n return {\"drug_resistance_list\": \"r\"}\n elif (resistant is not None) and (not resistant):\n return {\"drug_sensitive_list\": \"r\"}\n else:\n return {}\n\n\ndef get_lpa_test_resistance_properties(column_mapping, row):\n drug_resistances = [\n (\"r\", clean_mumbai_lpa_resistance_value(column_mapping.get_value(\"lpa_rif_result\", row))),\n (\"h_inha\", clean_mumbai_lpa_resistance_value(column_mapping.get_value(\"lpa_inh_result\", row))),\n ]\n return {\n \"drug_sensitive_list\": \" \".join(\n [drug for drug, resistant in drug_resistances if (not resistant) and (resistant is not None)]),\n \"drug_resistance_list\": \" \".join([drug for drug, resistant in drug_resistances if resistant])\n }\n\n\ndef get_sl_lpa_test_resistance_properties(column_mapping, row):\n result = column_mapping.get_value(\"sl_lpa_result\", row)\n if result is None:\n return {}\n drugs = result.split(\",\")\n drug_name_to_id = {\n DRUG_MAP[id][\"drug_name\"]: id for id in DRUG_MAP\n }\n for drug in drugs:\n drug = drug.strip()\n if drug not in drug_name_to_id:\n raise FieldValidationFailure(result, \"SLPA result\")\n properties = {\n \"drug_resistance_list\": \" \".join(filter(None, [drug_name_to_id[drug_name] for drug_name in drugs])),\n }\n return properties\n\n\ndef get_test_summary(properties):\n detected = None\n if properties.get('result') == 'tb_detected':\n detected = 'TB Detected'\n elif properties.get('result') == 'tb_not_detected':\n detected = 'TB Not Detected'\n\n drug_resistance_list = properties['drug_resistance_list'].split(' ') \\\n if properties.get('drug_resistance_list') else []\n drug_sensitive_list = properties['drug_sensitive_list'].split(' ') \\\n if properties.get('drug_sensitive_list') else []\n\n drug_output = []\n for drug_id in sorted(DRUG_MAP, key=lambda d: DRUG_MAP[d][\"sort_order\"]):\n if drug_id in drug_resistance_list:\n drug_output.append(\"{}: Res\".format(DRUG_MAP[drug_id]['drug_name']))\n elif drug_id in drug_sensitive_list:\n drug_output.append(\"{}: Sens\".format(DRUG_MAP[drug_id]['drug_name']))\n\n return '\\n'.join(filter(None, [detected] + drug_output))\n\n\ndef get_cbnaat_resistance(column_mapping, row):\n value = column_mapping.get_value(\"cbnaat_result\", row)\n if not value:\n return None\n clean_value = value.lower().strip()\n if clean_value not in [\"tb detected, rif resistance detected\", \"tb detected, rif sensitive\"]:\n raise FieldValidationFailure(value, \"cbnaat result\")\n return clean_value == \"tb detected, rif resistance detected\"\n\n\ndef clean_mumbai_lpa_resistance_value(value):\n return {\n None: None,\n \"Not tested\": None,\n \"R\": True,\n \"Resistant\": True,\n \"Sensitive\": False,\n \"S\": False,\n }[value]\n\n\ndef clean_sex(value):\n if not value:\n return None\n return {\n \"female\": \"female\",\n \"male\": \"male\",\n \"f\": \"female\",\n \"m\": \"male\",\n \"transgender\": \"transgender\"\n }[value.lower()]\n\n\ndef clean_age_entered(value):\n if not isinstance(value, (int, float, decimal.Decimal)):\n raise FieldValidationFailure(value, \"age\")\n return value\n\n\ndef calculate_dob(value):\n age = clean_age_entered(value)\n dob = datetime.date.today() - datetime.timedelta(days=age * 365)\n return str(dob)\n\n\ndef get_mehsana_resistance_properties(column_mapping, row):\n property_map = {\n \"Rif-Resi\": (\"r\", \"R: Res\"),\n \"Rif Resi+Levo Resi\": (\"r lfx\", \"R: Res\\nLFX: Res\"),\n \"Rif Resi+Levo Resi+K Resi\": (\"r lfx km\", \"R: Res\\nLFX: Res\\nKM: Res\"),\n \"Rif Resi+K Resi\": (\"r km\", \"R: Res\\nKM: Res\"),\n }\n dst_result_value = column_mapping.get_value(\"dst_result\", row)\n if dst_result_value:\n return {\n \"drug_resistance_list\": property_map[dst_result_value][0],\n \"result_summary_display\": property_map[dst_result_value][1]\n }\n else:\n return {}\n\n\ndef get_episode_regimen_change_history(column_mapping, row, episode_treatment_initiation_date):\n # TODO: This is odd Mehsana code\n # put_on_treatment = column_mapping.get_value(\"date_put_on_mdr_treatment\", row)\n # put_on_treatment = clean_date(put_on_treatment)\n # value = \"{}: MDR/RR\".format(episode_treatment_initiation_date)\n # if put_on_treatment:\n # value += \"\\n{}: {}\".format(\n # put_on_treatment,\n # column_mapping.get_value(\"type_of_treatment_initiated\", row)\n # )\n # return value\n current_treatment_regimen = column_mapping.get_value(\"treatment_regimen\", row)\n if current_treatment_regimen:\n return \"{}: {}\".format(episode_treatment_initiation_date, current_treatment_regimen)\n\n\ndef get_test_case_properties(domain, column_mapping, row, treatment_initiation_date):\n test_cases = []\n\n if column_mapping.get_value(\"cbnaat_result\", row):\n test_cases.append(get_cbnaat_test_case_properties(domain, column_mapping, row))\n elif column_mapping.get_value(\"testing_facility\", row):\n test_cases.append(get_mehsana_test_case_properties(domain, column_mapping, row))\n\n if column_mapping.get_value(\"lpa_rif_result\", row) or column_mapping.get_value(\"lpa_inh_result\", row):\n test_cases.append(get_lpa_test_case_properties(domain, column_mapping, row))\n if column_mapping.get_value(\"sl_lpa_result\", row):\n test_cases.append(get_sl_lpa_test_case_properties(domain, column_mapping, row))\n if column_mapping.get_value(\"culture_result\", row):\n test_cases.append(get_culture_test_case_properties(domain, column_mapping, row))\n dst_test_case_properties = get_dst_test_case_properties(column_mapping, row)\n if dst_test_case_properties:\n test_cases.append(dst_test_case_properties)\n\n test_cases.extend(get_follow_up_test_case_properties(domain, column_mapping, row, treatment_initiation_date))\n\n for t in test_cases:\n t['dataset'] = 'real'\n t['name'] = '{}-{}'.format(t.get('test_type_value'), t.get('date_reported'))\n\n return test_cases\n\n\ndef get_mehsana_test_case_properties(domain, column_mapping, row):\n facility_name, facility_id = match_facility(domain, column_mapping.get_value(\"testing_facility\", row))\n properties = {\n \"owner_id\": \"-\",\n \"date_reported\": column_mapping.get_value(\"report_sending_date\", row),\n \"testing_facility_name\": facility_name,\n \"testing_facility_id\": facility_id,\n }\n properties.update(get_selection_criteria_properties(column_mapping, row))\n properties.update(get_mehsana_resistance_properties(column_mapping, row))\n return properties\n\n\ndef get_cbnaat_test_case_properties(domain, column_mapping, row):\n cbnaat_lab_name, cbnaat_lab_id = match_facility(domain, column_mapping.get_value(\"cbnaat_lab\", row))\n date_reported = column_mapping.get_value(\"cbnaat_result_date\", row)\n if not date_reported:\n raise ValidationFailure(\"cbnaat result date required if result given\")\n\n properties = {\n \"owner_id\": \"-\",\n \"date_reported\": date_reported,\n \"testing_facility_name\": cbnaat_lab_name,\n \"testing_facility_id\": cbnaat_lab_id,\n \"lab_serial_number\": column_mapping.get_value(\"cbnaat_lab_number\", row),\n \"test_type_label\": \"CBNAAT\",\n \"test_type_value\": \"cbnaat\",\n \"date_tested\": clean_date(column_mapping.get_value(\"cbnaat_sample_date\", row)),\n \"result\": \"tb_not_detected\",\n \"drug_resistance_list\": '',\n \"drug_sensitive_list\": '',\n \"result_recorded\": \"yes\",\n \"rft_general\": \"diagnosis_drtb\",\n }\n\n properties.update(get_cbnaat_test_resistance_properties(column_mapping, row))\n if get_cbnaat_resistance(column_mapping, row) is not None:\n properties['result'] = \"tb_detected\"\n properties['result_summary_display'] = get_test_summary(properties)\n return properties\n\n\ndef get_lpa_test_case_properties(domain, column_mapping, row):\n lpa_lab_name, lpa_lab_id = match_facility(domain, column_mapping.get_value(\"lpa_lab\", row))\n result_date = clean_date(column_mapping.get_value(\"lpa_result_date\", row))\n if not result_date:\n raise ValidationFailure(\"LPA result date required if result included\")\n\n properties = {\n \"owner_id\": \"-\",\n \"testing_facility_name\": lpa_lab_name,\n \"testing_facility_id\": lpa_lab_id,\n \"lab_serial_number\": column_mapping.get_value(\"lpa_lab_number\", row),\n \"test_type_label\": \"FL LPA\",\n \"test_type_value\": \"fl_line_probe_assay\",\n \"date_tested\": clean_date(column_mapping.get_value(\"lpa_sample_date\", row)),\n \"date_reported\": result_date,\n \"result\": \"tb_not_detected\",\n \"drug_resistance_list\": '',\n \"drug_sensitive_list\": '',\n \"result_recorded\": \"yes\",\n \"rft_general\": \"diagnosis_drtb\",\n }\n\n properties.update(get_lpa_test_resistance_properties(column_mapping, row))\n if properties['drug_resistance_list']:\n properties['result'] = \"tb_detected\"\n properties['result_summary_display'] = get_test_summary(properties)\n return properties\n\n\ndef get_sl_lpa_test_case_properties(domain, column_mapping, row):\n sl_lpa_lab_name, sl_lpa_lab_id = match_facility(domain, column_mapping.get_value(\"sl_lpa_lab\", row))\n date_reported = clean_date(column_mapping.get_value(\"lpa_result_date\", row))\n if not date_reported:\n raise ValidationFailure(\"LPA result date required if result included\")\n properties = {\n \"owner_id\": \"-\",\n \"testing_facility_name\": sl_lpa_lab_name,\n \"testing_facility_id\": sl_lpa_lab_id,\n \"lab_serial_number\": column_mapping.get_value(\"sl_lpa_lab_number\", row),\n \"test_type_label\": \"SL LPA\",\n \"test_type_value\": \"sl_line_probe_assay\",\n \"date_tested\": clean_date(column_mapping.get_value(\"lpa_sample_date\", row)),\n \"date_reported\": date_reported,\n \"result\": \"tb_not_detected\",\n \"drug_resistance_list\": '',\n \"drug_sensitive_list\": '',\n \"result_recorded\": \"yes\",\n \"rft_general\": \"diagnosis_drtb\",\n }\n\n properties.update(get_sl_lpa_test_resistance_properties(column_mapping, row))\n if properties['drug_resistance_list']:\n properties['result'] = \"tb_detected\"\n properties['result_summary_display'] = get_test_summary(properties)\n return properties\n\n\ndef get_culture_test_case_properties(domain, column_mapping, row):\n lab_name, lab_id = match_facility(domain, column_mapping.get_value(\"culture_lab\", row))\n culture_type = clean_culture_type(column_mapping.get_value(\"culture_type\", row))\n date_reported = clean_date(column_mapping.get_value(\"culture_result_date\", row))\n if not date_reported:\n raise ValidationFailure(\"Culture date reported required if result included\")\n\n properties = {\n \"owner_id\": \"-\",\n \"testing_facility_name\": lab_name,\n \"testing_facility_id\": lab_id,\n \"lab_serial_number\": column_mapping.get_value(\"culture_lab_number\", row),\n \"test_type_value\": \"culture\",\n \"date_tested\": clean_date(column_mapping.get_value(\"culture_sample_date\", row)),\n \"date_reported\": date_reported,\n \"culture_type\": culture_type,\n \"test_type_label\": get_culture_type_label(culture_type) or 'Culture',\n \"result\": clean_result(column_mapping.get_value(\"culture_result\", row)),\n \"drug_resistance_list\": '',\n \"drug_sensitive_list\": '',\n \"result_recorded\": \"yes\",\n \"rft_general\": \"diagnosis_drtb\",\n }\n properties['result_summary_display'] = get_test_summary(properties)\n return properties\n\n\ndef clean_culture_type(value):\n if not value:\n return None\n clean_value = value.lower().strip()\n try:\n return {\n \"lc\": \"lc\",\n \"lj\": \"lj\",\n \"liquid\": \"lc\",\n }[clean_value]\n except KeyError:\n raise FieldValidationFailure(value, \"Culture Type\")\n\n\ndef get_culture_type_label(culture_type):\n return {\n None: None,\n \"lc\": \"Culture (LC)\",\n \"lj\": \"Culture (LJ)\",\n }[culture_type]\n\n\ndef get_dst_test_case_properties(column_mapping, row):\n date_reported = clean_date(column_mapping.get_value(\"dst_result_date\", row))\n if date_reported:\n resistance_props = get_dst_test_resistance_properties(column_mapping, row)\n if resistance_props['drug_resistance_list'] or resistance_props['drug_sensitive_list']:\n properties = {\n \"owner_id\": \"-\",\n \"test_type_value\": \"dst\",\n \"test_type_label\": \"DST\",\n \"date_tested\": clean_date(column_mapping.get_value(\"dst_sample_date\", row)),\n \"date_reported\": date_reported,\n \"dst_test_type\": column_mapping.get_value(\"dst_type\", row),\n \"result\": \"tb_detected\",\n \"drug_resistance_list\": '',\n \"drug_sensitive_list\": '',\n \"result_recorded\": \"yes\",\n \"rft_general\": \"diagnosis_drtb\",\n }\n properties.update(resistance_props)\n properties['result_summary_display'] = get_test_summary(properties)\n return properties\n return None\n\n\ndef get_dst_test_resistance_properties(column_mapping, row):\n resistant_drugs = []\n sensitive_drugs = []\n for drug_id in DRUG_MAP:\n try:\n value = column_mapping.get_value(drug_id, row)\n except KeyError:\n continue\n if value:\n sensitivity = convert_sensitivity(value)\n if sensitivity == \"sensitive\":\n sensitive_drugs.append(drug_id)\n elif sensitivity == \"resistant\":\n resistant_drugs.append(drug_id)\n\n return {\n \"drug_resistance_list\": \" \".join(resistant_drugs),\n \"drug_sensitive_list\": \" \".join(sensitive_drugs),\n }\n\n\ndef get_drug_resistance_case_properties(column_mapping, row, test_cases):\n # generate empty / unknown drug_resistance cases\n dr_cases = {}\n for drug_id in DRUG_MAP:\n dr_cases[drug_id] = {\n \"name\": drug_id,\n \"owner_id\": \"-\",\n \"drug_id\": drug_id,\n \"sort_order\": DRUG_MAP[drug_id][\"sort_order\"],\n \"sensitivity\": \"unknown\",\n }\n\n # Update data based on Mehsana drug resistance list\n drugs = get_mehsana_resistance_properties(column_mapping, row).get(\"drug_resistance_list\", [])\n if drugs:\n drugs = drugs.split(\" \")\n for drug_id in drugs:\n dr_cases[drug_id]['sensitivity'] = 'resistant'\n\n test_cases = sorted([t for t in test_cases if 'date_reported' in t], key=lambda t: t['date_reported'])\n for t in test_cases:\n drug_resistance_list = t['drug_resistance_list'] or []\n if drug_resistance_list:\n drug_resistance_list = drug_resistance_list.split(' ')\n drug_sensitive_list = t['drug_sensitive_list'] or []\n if drug_sensitive_list:\n drug_sensitive_list = drug_sensitive_list.split(' ')\n\n test_drugs = [(drug_id, \"resistant\") for drug_id in drug_resistance_list] + \\\n [(drug_id, \"sensitive\") for drug_id in drug_sensitive_list]\n for drug_id, sensitivity in test_drugs:\n dr_cases[drug_id]['test_type'] = t['test_type_value']\n dr_cases[drug_id]['test_type_label'] = t['test_type_label']\n dr_cases[drug_id]['result_date'] = t['date_reported']\n dr_cases[drug_id]['specimen_date'] = t['date_tested']\n dr_cases[drug_id]['sensitivity'] = sensitivity\n\n # add any resistance info not tied to a test\n for drug_id in DRUG_MAP:\n if dr_cases[drug_id]['sensitivity'] == 'unknown':\n try:\n value = column_mapping.get_value(drug_id, row)\n except KeyError:\n continue\n dr_cases[drug_id]['sensitivity'] = convert_sensitivity(value)\n\n return list(dr_cases.values())\n\n\ndef convert_sensitivity(sensitivity_value):\n if not sensitivity_value:\n return \"unknown\"\n return {\n \"sensitive\": \"sensitive\",\n \"resistant\": \"resistant\",\n \"resisant\": \"resistant\",\n \"resisitant\": \"resistant\",\n \"unknown\": \"unknown\",\n \"s\": \"sensitive\",\n \"r\": \"resistant\",\n \"conta\": \"unknown\",\n \"\": \"unknown\",\n }[sensitivity_value.lower().strip()]\n\n\ndef convert_treatment_status(status_in_xlsx):\n second_line = \"initiated_on_second_line_treatment\"\n first_line = \"initiated_first_line_treatment\"\n return {\n \"Mono H\": first_line,\n \"CAT I/II\": first_line,\n \"Cat IV\": second_line,\n \"Cat-iv\": second_line,\n \"Cat iv\": second_line,\n \"CAT IV\": second_line,\n \"CAT-IV\": second_line,\n \"CATIV\": second_line,\n \"Cat V\": second_line,\n \"Not initiated (reason remark)\": \"other\",\n }[status_in_xlsx]\n\n\ndef clean_patient_type(value):\n if not value:\n return None\n clean_value = value.lower().replace(' ', '_')\n try:\n return {\n \"new\": \"new\",\n \"recurrent\": \"recurrent\",\n \"treatment_after_failure\": \"treatment_after_failure\",\n \"treatment_after_ltfu\": \"treatment_after_lfu\",\n \"treatment_after_lfu\": \"treatment_after_lfu\",\n \"other_previously_treated\": \"other_previously_treated\",\n \"unknown\": \"unknown\",\n }[clean_value]\n except KeyError:\n raise FieldValidationFailure(value, \"type of patient\")\n\n\ndef get_follow_up_test_case_properties(domain, column_mapping, row, treatment_initiation_date):\n properties_list = []\n\n # Mehsana\n for follow_up in (3, 4, 5, 6, 9, 12, \"end\"):\n if column_mapping.get_value(\"month_{}_follow_up_send_date\".format(follow_up), row):\n properties = {\n \"owner_id\": \"-\",\n \"date_tested\": clean_date(\n column_mapping.get_value(\"month_{}_follow_up_send_date\".format(follow_up), row)),\n \"date_reported\": clean_date(\n column_mapping.get_value(\"month_{}_follow_up_result_date\".format(follow_up), row)),\n \"result\": clean_result(\n column_mapping.get_value(\"month_{}_follow_up_result\".format(follow_up), row)),\n \"test_type_value\": \"culture\",\n \"test_type_label\": \"Culture\",\n \"rft_general\": \"follow_up_drtb\",\n \"drug_resistance_list\": '',\n \"drug_sensitive_list\": '',\n \"result_recorded\": \"yes\",\n }\n properties[\"rft_drtb_follow_up_treatment_month\"] = get_follow_up_month(\n follow_up, properties['date_tested'], treatment_initiation_date\n )\n properties[\"result_summary_display\"] = get_test_summary(properties)\n properties_list.append(properties)\n\n # Mumbai\n if hasattr(column_mapping, \"follow_up_culture_month_start\"):\n month = column_mapping.follow_up_culture_month_start\n while month <= 36:\n if month == 34 or month == 35:\n pass\n else:\n result = column_mapping.get_follow_up_culture_result(month, row)\n if result:\n date_reported = clean_date(column_mapping.get_follow_up_culture_date(month, row))\n lab_name, lab_id = match_facility(domain, column_mapping.get_follow_up_culture_lab(month, row))\n properties = {\n \"owner_id\": \"-\",\n \"test_type_value\": \"culture\",\n \"test_type_label\": \"Culture\",\n \"testing_facility_name\": lab_name,\n \"testing_facility_id\": lab_id,\n \"rft_general\": \"follow_up_drtb\",\n \"rft_drtb_follow_up_treatment_month\": month,\n \"date_reported\": date_reported,\n \"result\": clean_result(result),\n \"drug_resistance_list\": '',\n \"drug_sensitive_list\": '',\n \"result_recorded\": \"yes\",\n }\n properties[\"result_summary_display\"] = get_test_summary(properties)\n properties_list.append(properties)\n month += 1\n\n return properties_list\n\n\ndef get_follow_up_month(follow_up_month_identifier, date_tested, treatment_initiation_date):\n if isinstance(follow_up_month_identifier, int):\n return str(follow_up_month_identifier)\n else:\n return str(int(round((date_tested - treatment_initiation_date).days / 30.4)))\n\n\ndef get_secondary_owner_case_properties(domain, city_constants, column_mapping, row, district_id):\n drtb_hiv_name, drtb_hiv_id = get_drtb_hiv_location(domain, district_id)\n drtb_c_name, drtb_c_id = get_drtb_center_location(domain, column_mapping, row, city_constants)\n return [\n {\n \"secondary_owner_name\": drtb_c_name,\n \"secondary_owner_type\": \"drtb\",\n \"owner_id\": drtb_c_id,\n },\n {\n \"secondary_owner_name\": drtb_hiv_name,\n \"secondary_owner_type\": \"drtb-hiv\",\n \"owner_id\": drtb_hiv_id,\n }\n ]\n\n\ndef clean_diabetes_status(value):\n if not value:\n return None\n clean_value = value.lower().replace(' ', '_')\n try:\n return {\n \"diabetic\": \"diabetic\",\n \"positive\": \"diabetic\",\n \"non_diabetic\": \"non_diabetic\",\n \"negative\": \"non_diabetic\",\n \"unknown\": \"unknown\",\n }[clean_value]\n except KeyError:\n raise FieldValidationFailure(value, \"Diabetes status\")\n\n\ndef clean_weight_band(value):\n if not value:\n return None\n try:\n return {\n \"Less than 16\": \"drtb_conventional_old_lt_16\",\n \"16-29\": \"drtb_conventional_16_29\",\n \"30-45\": \"drtb_conventional_30_45\",\n \"16-25\": \"drtb_conventional_old_16_25\",\n \"26-45\": \"drtb_conventional_old_26_45\",\n \"46-70\": \"drtb_conventional_old_46_70\",\n \"Above 70\": \"drtb_conventional_old_gt70\"\n }[value]\n except KeyError:\n raise FieldValidationFailure(value, \"Weight Band\")\n\n\ndef clean_height(value):\n if value is None:\n return None\n if re.match(\"[0-9]*\", str(value)):\n return value\n raise FieldValidationFailure(value, \"height\")\n\n\ndef clean_treatment_regimen(value):\n if not value:\n return None\n try:\n return {\n \"Regimen for XDR TB\": \"xdr\",\n \"Regimen for MDR/RR TB\": \"mdr_rr\",\n \"Modified Regimen for MDR/RR-TB + FQ/SLI resistance\": \"mdr_rr_fq_sli\",\n \"Regimen with New Drug for MDR-TB Regimen + FQ/SLI resistance\": \"new_drug_mdr_rr_fq_sli\",\n \"Regimen with New Drug for XDR-TB\": \"new_xdr\",\n \"Modified regimen for mixed pattern resistance\": \"mixed_pattern\",\n \"Regimen for INH mono/poly resistant TB\": \"inh_poly_mono\",\n \"Regimen with New Drug for failures of regimen for MDR TB\": \"new_fail_mdr\",\n }[value]\n except KeyError:\n raise FieldValidationFailure(value, \"Treatment Regimen\")\n\n\ndef clean_phone_number(value):\n \"\"\"\n Convert the phone number to the 10 digit format if possible, else return the misformated number\n \"\"\"\n if not value:\n return None\n\n if not isinstance(value, six.string_types + (int,)):\n raise FieldValidationFailure(value, \"phone number\")\n\n try:\n values = value.split(\"/\")\n value = values[0]\n except AttributeError:\n # This exception will be raised if value is an int.\n pass\n\n cleaned = re.sub('[^0-9]', '', str(value))\n\n if len(cleaned) == 12 and cleaned[:2] == \"91\":\n return cleaned[2:]\n elif len(cleaned) == 11 and cleaned[0] == \"0\":\n return cleaned[1:]\n else:\n return cleaned\n\n\ndef clean_contact_phone_number(clean_phone_number):\n \"\"\"\n :param clean_phone_number: A string returned by clean_phone_number()\n :return: The phone number in 12 digit format if clean_phone_number was 10 digits, otherwise None.\n \"\"\"\n if not clean_phone_number:\n return None\n elif len(clean_phone_number) == 10:\n return \"91\" + clean_phone_number\n else:\n return None\n\n\ndef _starts_with_any(value, strings):\n for s in strings:\n if value.startswith(s):\n return True\n return False\n\n\ndef clean_hiv_status(value):\n if not value:\n return None\n clean_value = value.lower().replace(' ', '_')\n try:\n return {\n \"reactive\": \"reactive\",\n \"non_reactive\": \"non_reactive\",\n \"positive\": \"reactive\",\n \"negative\": \"non_reactive\",\n \"unknown\": \"unknown\",\n }[clean_value]\n except KeyError:\n raise FieldValidationFailure(value, \"HIV status\")\n\n\ndef clean_socioeconomic_status(value):\n if value is None:\n return \"unknown\"\n return {\n \"bpl\": \"bpl\",\n \"apl\": \"apl\",\n \"unknown\": \"unknown\",\n }[value.lower()]\n\n\ndef clean_occupation(value):\n if not value:\n return None\n clean_value = value.lower().strip()\n try:\n return {\n \"office clerk\": \"office_clerk\",\n \"other craft or related trader or worker\": \"other_craft_and_related\",\n \"corporate manager\": \"corporate_manager\",\n \"legislators or senior official\": \"legislators_or_senior_official\",\n \"legistrator or senior official\": \"legislators_or_senior_official\",\n \"general manager\": \"general_manager\",\n \"other professional\": \"other_professional\",\n \"physical, mathematical and engineering science professional\": \"physical_mathematical_and_engineering\",\n \"subsistence agriculture or fishery worker\": \"subsistence_agriculture_fishery\",\n \"sales and services elementary occupation\": \"sales_and_services_elementary\",\n \"sales and service elementary occupation\": \"sales_and_services_elementary\",\n \"extraction and building trade worker\": \"extraction_and_building_trade\",\n \"model, sales person or demonstrator\": \"model_sales_persons_demonstrator\",\n \"laborer in mining, construction, manufacturing and transport\":\n \"mining_construction_manufacturing_transport\",\n \"labourer in mining, constuction, manufacturing and transport\":\n \"mining_construction_manufacturing_transport\",\n \"agriculture, fishery and related labor\": \"agriculture_fishery_and_related\",\n \"unidentifiable occupation or inadequate reporting\": \"occupation_unidentifiable\",\n \"workers reporting occupation unidentifiable or inadequately\": \"occupation_unidentifiable\",\n \"stationary plant and related operators\": \"stationary_plant_and_related\",\n \"teaching associate professional\": \"teaching_associate\",\n \"life sciences and health associate professional\": \"life_sciences_and_health_associate\",\n \"other associate professional\": \"other_associate\",\n \"customer services clerk\": \"customer_services_clerk\",\n \"life sciences and health professional\": \"life_sciences_and_health\",\n \"personal protective service provider\": \"person_protective_service_provider\",\n \"metal, machinery or related trade worker\": \"metal_machinery_and_related\",\n \"driver or mobile plant operator\": \"driver_and_mobile_plant_operator\",\n \"driver or mobile plan operator\": \"driver_and_mobile_plant_operator\",\n \"teaching professional\": \"teaching_professional\",\n \"no occupation reported\": \"no_occupation_reported\",\n \"machine operator or assembler\": \"machine_operator_or_assembler\",\n \"new worker seeking employment\": \"new_worker_seeking_employment\",\n \"precision, handicraft, printing or related trade worker\": \"precision_handicraft_printing_and_related\",\n \"market-oriented, skilled agriculture or fishery worker\": \"market_oriented_agriculture_fishery\",\n \"physical and engineering science associate professional\":\n \"physical_and_engineering_science_associate\",\n }[clean_value]\n except KeyError:\n raise FieldValidationFailure(value, \"Occupation\")\n\n\ndef clean_marital_status(value):\n if not value:\n return None\n clean_value = value.lower().strip()\n try:\n return {\n \"unmarried\": \"unmarried\",\n \"single\": \"unmarried\",\n \"married\": \"married\",\n \"widowed\": \"widowed\",\n \"separated\": \"separated\",\n \"\": None,\n }[clean_value]\n except KeyError:\n raise FieldValidationFailure(value, \"Marital Status\")\n\n\ndef clean_result(value):\n value = value.lower().strip()\n return {\n None: NO_RESULT,\n NO_RESULT.lower(): NO_RESULT,\n NOT_DETECTED.lower(): NOT_DETECTED,\n DETECTED.lower(): DETECTED,\n \"sample rejected\": NO_RESULT,\n \"result awaited\": NO_RESULT,\n \"conta\": NO_RESULT,\n \"contaminated\": NO_RESULT,\n \"na\": NO_RESULT,\n \"neg\": NOT_DETECTED,\n \"negetive\": NOT_DETECTED,\n \"negative\": NOT_DETECTED,\n \"pos\": DETECTED,\n \"positive\": DETECTED,\n }[value]\n\n\ndef clean_drtb_type(value):\n if value is None:\n return \"unknown\"\n if value not in [\n \"mdr\",\n \"xdr\",\n \"rr\",\n \"pdr\",\n \"mr\",\n \"unknown\",\n ]:\n raise FieldValidationFailure(value, \"DRTB type\")\n return value\n\n\ndef result_label(result):\n if result == NO_RESULT:\n return \"Unknown\"\n elif result == DETECTED:\n return \"TB Detected\"\n elif result == NOT_DETECTED:\n return \"TB Not Detected\"\n else:\n raise Exception(\"Unexpected test result value\")\n\n\ndef clean_date(messy_date_string):\n if messy_date_string:\n if isinstance(messy_date_string, datetime.date):\n return messy_date_string\n if messy_date_string == \"?\" or not messy_date_string.strip():\n return None\n\n # The excel library we use should actually import dates correctly if the column format is date.\n raise Exception(\"Got a date like {}\".format(messy_date_string))\n\n # I think some columns are month/day/year and some are day/month/year\n # cleaned_datetime = parse(messy_date_string, dayfirst=False)\n # return cleaned_datetime.date()\n\n\ndef match_district(domain, xlsx_district_name):\n return match_location(domain, xlsx_district_name, \"dto\")\n\n\n@memoized\ndef match_location(domain, xlsx_name, location_type=None):\n \"\"\"\n Given location name taken from the spreadsheet, return the name and id of the matching location in HQ.\n \"\"\"\n if not xlsx_name:\n return None, None\n xlsx_name = xlsx_name.strip()\n\n default_query_kwargs = {\"domain\": domain}\n if location_type:\n default_query_kwargs[\"location_type__code\"] = location_type\n\n try:\n kwargs = {\"name__iexact\": xlsx_name}\n kwargs.update(default_query_kwargs)\n location = SQLLocation.active_objects.get(**kwargs)\n except SQLLocation.DoesNotExist:\n possible_matches = (SQLLocation.active_objects\n .filter(**default_query_kwargs)\n .filter(models.Q(name__icontains=xlsx_name)))\n if len(possible_matches) == 1:\n location = possible_matches[0]\n elif len(possible_matches) > 1:\n raise ValidationFailure(\"Multiple location matches for {}\".format(xlsx_name))\n else:\n raise ValidationFailure(\"No location matches for {}\".format(xlsx_name))\n return location.name, location.location_id\n\n\n@memoized\ndef match_location_by_site_code(domain, site_code):\n \"\"\"\n Given a site code, return the name and id of the matching location in HQ.\n \"\"\"\n if not site_code:\n return None, None\n site_code = site_code.strip()\n\n location = SQLLocation.objects.get_or_None(domain=domain, site_code=site_code)\n if location:\n return location.name, location.location_id\n else:\n raise ValidationFailure(\"No location matches for {}\".format(site_code))\n\n\ndef match_facility(domain, xlsx_facility_name):\n \"\"\"\n Given lab facility name taken from the spreadsheet, return the name and id of the matching location in HQ.\n \"\"\"\n if not xlsx_facility_name:\n return None, None\n elif \"other\" in xlsx_facility_name.lower():\n return xlsx_facility_name, None\n else:\n # this is really ugly but some rows have a lab code\n # our site codes are prepended with cdst and cbnaat\n try:\n return match_location(domain, xlsx_facility_name, location_type=\"cdst\")\n except ValidationFailure:\n try:\n cbnaat_site_code = \"cbnaat_\" + xlsx_facility_name.strip().replace('-', '_').lower()\n return match_location_by_site_code(domain, cbnaat_site_code)\n except ValidationFailure:\n cdst_site_code = \"cdst_\" + xlsx_facility_name.strip().replace('-', '_').lower()\n return match_location_by_site_code(domain, cdst_site_code)\n\n\ndef match_phi(domain, xlsx_phi_name):\n location_name, location_id = match_location(domain, xlsx_phi_name, \"phi\")\n if not location_id:\n raise ValidationFailure(\"A valid phi is required\")\n return location_name, location_id\n\n\ndef get_tu(domain, phi_id):\n if not phi_id:\n return None, None\n phi = SQLLocation.active_objects.get(domain=domain, location_id=phi_id)\n return phi.parent.name, phi.parent.location_id\n\n\ndef get_drtb_hiv_location(domain, district_id):\n if not district_id:\n return None, None\n drtb_hiv = SQLLocation.active_objects.get(\n domain=domain,\n parent__location_id=district_id,\n location_type__code=\"drtb-hiv\"\n )\n return drtb_hiv.name, drtb_hiv.location_id\n\n\ndef get_drtb_center_location(domain, column_mapping, row, city_constants):\n if column_mapping.get_value(\"drtb_center_code\", row):\n value = column_mapping.get_value(\"drtb_center_code\", row)\n site_code = \"drtb_\" + value.strip().lower().replace('-', '_')\n return match_location_by_site_code(domain, site_code)\n else:\n return city_constants.drtb_center_name, city_constants.drtb_center_id\n\n\ndef get_drtb_type(drug_resistance):\n \"\"\"\n This function expects a list of dictionary objects specifying the drug_id, drug_class and the\n sensitivity of each drug\n\n it calculates drtb_type using the following rule (from the app):\n xdr:\n count(/data/drug_resistance/item[sensitivity = 'resistant'][drug_id = 'r']) > 0\n and count(/data/drug_resistance/item[sensitivity = 'resistant'][drug_id = 'h_inha' or drug_id = 'h_katg']) > 0\n and count(/data/drug_resistance/item[sensitivity = 'resistant'][drug_class = 'fq']) > 0\n and count(/data/drug_resistance/item[sensitivity = 'resistant'][drug_class = 'slid']) > 0\n\n mdr:\n count(/data/drug_resistance/item[sensitivity = 'resistant'][drug_id = 'r']) > 0\n and count(/data/drug_resistance/item[sensitivity = 'resistant'][drug_id = 'h_inha' or drug_id = 'h_katg']) > 0\n\n rr:\n count(/data/drug_resistance/item[sensitivity = 'resistant']) = 1\n and /data/drug_resistance/item[sensitivity = 'resistant']/drug_id = 'r'\n\n pdr:\n count(/data/drug_resistance/item[sensitivity = 'resistant'][drug_class = 'first_line']) > 1\n and (count(/data/drug_resistance/item[sensitivity = 'resistant'][drug_id = 'r'])\n + count(/data/drug_resistance/item[sensitivity = 'resistant'][drug_id = 'h_inha' or drug_id = 'h_katg'])) < 2\n\n mr:\n count(/data/drug_resistance/item[sensitivity = 'resistant'][drug_class = 'first_line']) = 1 and\n count(/data/drug_resistance/item[sensitivity = 'resistant'][drug_id = 'r']) = 0\n \"\"\"\n def is_resistant_drug(drug_id):\n return len([\n d for d in drug_resistance\n if d[DRUG_ID] == drug_id and d[SENSITIVITY] == RESISTANT\n ]) > 0\n\n def is_resistant_class(drug_class):\n return len([\n d for d in drug_resistance\n if d[DRUG_CLASS] == drug_class and d[SENSITIVITY] == RESISTANT\n ]) > 0\n\n if (\n is_resistant_drug(DRUG_R)\n and (is_resistant_drug(DRUG_H_INHA) or is_resistant_drug(DRUG_H_KATG))\n and is_resistant_class(DRUG_CLASS_FQ)\n and is_resistant_class(DRUG_CLASS_SLID)\n ):\n return 'xdr'\n elif (\n is_resistant_drug(DRUG_R)\n and (is_resistant_drug(DRUG_H_INHA) or is_resistant_drug(DRUG_H_KATG))\n ):\n return 'mdr'\n elif (\n len([d for d in drug_resistance if d[SENSITIVITY] == RESISTANT]) == 1\n and is_resistant_drug(DRUG_R)\n ):\n return 'rr'\n elif (\n len([d for d in drug_resistance\n if d[SENSITIVITY] == RESISTANT and d[DRUG_CLASS] == DRUG_CLASS_FIRST]) > 1\n and is_resistant_drug(DRUG_R)\n + (is_resistant_drug(DRUG_H_INHA) or is_resistant_drug(DRUG_H_KATG)) < 2\n ):\n return 'pdr'\n elif (\n len([d for d in drug_resistance\n if d[SENSITIVITY] == RESISTANT and d[DRUG_CLASS] == DRUG_CLASS_FIRST]) == 1\n and not is_resistant_drug(DRUG_R)\n ):\n return 'mr'\n else:\n return 'unknown'\n\n\nclass _PersonIdGenerator(object):\n \"\"\"\n Person cases in eNikshay require unique, human-readable ids.\n These ids are generated by combining a user id, device id, and serial count for the user/device pair\n\n This script is its own \"device\", and in --commit runs, the serial count is maintained in a database to insure\n that the next number is always unique.\n \"\"\"\n\n dry_run_counter = 0\n\n @classmethod\n def _next_serial_count(cls, commit):\n if commit:\n return MigratedDRTBCaseCounter.get_next_counter()\n else:\n cls.dry_run_counter += 1\n return cls.dry_run_counter\n\n @classmethod\n def _next_serial_count_compressed(cls, commit):\n return compress_nikshay_id(cls._next_serial_count(commit), 2)\n\n @classmethod\n def get_id_issuer_body(cls, user):\n id_issuer_body = user.user_data['id_issuer_body']\n assert id_issuer_body\n return id_issuer_body\n\n @classmethod\n def get_user(cls, domain, phi_id):\n users = get_users_by_location_id(domain, phi_id)\n for user in sorted(users, key=lambda u: u.username):\n if user.user_data['id_issuer_body']:\n return user\n raise Exception(\"No suitable user found at location {}\".format(phi_id))\n\n @classmethod\n def id_device_body(cls, user, commit):\n script_device_id = \"drtb-case-import-script\"\n update_device_meta(user, script_device_id)\n if commit:\n user.save()\n index = [x.device_id for x in user.devices].index(script_device_id)\n return compress_nikshay_id(index + 1, 0)\n\n @classmethod\n def generate_person_id_flat(cls, domain, phi_id, commit):\n \"\"\"\n Generate a flat person id. If commit is False, this id will only be unique within this run of the\n management command, it won't be unique between runs.\n \"\"\"\n user = cls.get_user(domain, phi_id)\n return (\n cls.get_id_issuer_body(user) +\n cls.id_device_body(user, commit) +\n cls._next_serial_count_compressed(commit)\n )\n\n\nImportFormat = namedtuple(\"ImportFormat\", \"column_mapping constants header_rows\")\n\n\nclass Command(BaseCommand):\n\n MEHSANA_2017 = \"mehsana2017\"\n MEHSANA_2016 = \"mehsana2016\"\n MUMBAI = \"mumbai\"\n FORMATS = [MEHSANA_2016, MEHSANA_2017, MUMBAI]\n\n def add_arguments(self, parser):\n parser.add_argument(\n 'domain',\n help=\"the domain to create the new cases in\"\n )\n parser.add_argument(\n 'excel_file_path',\n help=\"a path to an excel file to be imported\"\n )\n parser.add_argument(\n 'format',\n help=\"the format of the given excel file. Options are: {}.\".format(\", \".join(self.FORMATS)),\n )\n parser.add_argument(\n '--commit',\n action='store_true',\n help=\"actually create the cases. Without this flag, it's a dry run.\"\n )\n\n def handle(self, domain, excel_file_path, format, **options):\n migration_id = self.generate_id()\n self.log_meta_info(migration_id, options['commit'])\n import_format = self.get_import_format(format)\n case_factory = CaseFactory(domain)\n\n import_log_file_name = \"drtb-import-{}.csv\".format(migration_id)\n bad_rows_file_name = \"{}-bad-rows.csv\".format(migration_id)\n rows_with_unknown_exceptions = 0\n\n with open_any_workbook(excel_file_path) as workbook, \\\n open(bad_rows_file_name, \"w\") as bad_rows_file, \\\n open(import_log_file_name, \"w\") as import_log_file:\n\n import_log_writer = csv.writer(import_log_file)\n bad_rows_file_writer = csv.writer(bad_rows_file)\n import_log_writer.writerow([\"row\", \"case_ids\", \"exception\"])\n\n for i, row in enumerate(workbook.worksheets[0].iter_rows()):\n if i < import_format.header_rows:\n # Skip the headers rows\n if i == 0:\n extra_cols = [\"original import row number\", \"error message\"]\n else:\n extra_cols = [None, None]\n bad_rows_file_writer.writerow(extra_cols + [six.text_type(c.value).encode('utf-8') for c in row])\n continue\n\n row_contains_data = any(cell.value for cell in row)\n if not row_contains_data:\n continue\n\n try:\n import_format.column_mapping.check_for_required_fields(row)\n case_structures = get_case_structures_from_row(\n options['commit'], domain, migration_id, import_format.column_mapping,\n import_format.constants, row\n )\n import_log_writer.writerow([i, \",\".join(x.case_id for x in case_structures)])\n logger.info(\"Creating cases for row {}\".format(i))\n\n if options['commit']:\n case_factory.create_or_update_cases(case_structures)\n except Exception as e:\n logger.info(\"Creating case structures for row {} failed\".format(i))\n if isinstance(e, ValidationFailure):\n exception_as_string = e.message\n else:\n rows_with_unknown_exceptions += 1\n exception_as_string = traceback.format_exc()\n import_log_writer.writerow([i, \"\", exception_as_string])\n bad_rows_file_writer.writerow([i, exception_as_string] +\n [six.text_type(c.value).encode('utf-8') for c in row])\n\n print(\"{} rows with unknown exceptions\".format(rows_with_unknown_exceptions))\n\n def generate_id(self):\n now = datetime.datetime.now()\n # YYYY-MM-DD_HHMMSS\n format = \"%Y-%m-%d_%H%M%S\"\n return now.strftime(format)\n\n @staticmethod\n def log_meta_info(migration_id, commit):\n logger.info(\"Starting DRTB import with id {}\".format(migration_id))\n if commit:\n logger.info(\"This is a REAL RUN\")\n else:\n logger.info(\"This is a dry run\")\n\n @classmethod\n def get_import_format(cls, format_string):\n if format_string == cls.MEHSANA_2016:\n return ImportFormat(\n Mehsana2016ColumnMapping,\n MehsanaConstants,\n 1,\n )\n elif format_string == cls.MEHSANA_2017:\n return ImportFormat(\n Mehsana2017ColumnMapping,\n MehsanaConstants,\n 1,\n )\n elif format_string == cls.MUMBAI:\n return ImportFormat(\n MumbaiColumnMapping,\n MumbaiConstants,\n 2,\n )\n else:\n raise Exception(\"Invalid format. Options are: {}.\".format(\", \".join(cls.FORMATS)))\n", "sub_path": "custom/enikshay/two_b_datamigration/management/commands/import_drtb_cases.py", "file_name": "import_drtb_cases.py", "file_ext": "py", "file_size_in_byte": 85885, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "logging.getLogger", "line_number": 130, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 707, "usage_type": "call"}, {"api_name": "custom.enikshay.case_utils.CASE_TYPE_PERSON", "line_number": 722, "usage_type": "argument"}, {"api_name": "custom.enikshay.case_utils.CASE_TYPE_OCCURRENCE", "line_number": 725, "usage_type": "argument"}, {"api_name": "custom.enikshay.case_utils.CASE_TYPE_EPISODE", "line_number": 728, "usage_type": "argument"}, {"api_name": "custom.enikshay.case_utils.CASE_TYPE_DRUG_RESISTANCE", "line_number": 731, "usage_type": "argument"}, {"api_name": "custom.enikshay.case_utils.CASE_TYPE_TEST", "line_number": 736, "usage_type": "argument"}, {"api_name": "custom.enikshay.case_utils.CASE_TYPE_SECONDARY_OWNER", "line_number": 741, "usage_type": "argument"}, {"api_name": "custom.enikshay.user_setup.join_chunked", "line_number": 757, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 776, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 778, "usage_type": "call"}, {"api_name": "casexml.apps.case.mock.CaseIndex", "line_number": 794, "usage_type": "call"}, {"api_name": "casexml.apps.case.const.CASE_INDEX_EXTENSION", "line_number": 797, "usage_type": "name"}, {"api_name": "casexml.apps.case.mock.CaseStructure", "line_number": 800, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 963, "usage_type": "attribute"}, {"api_name": "re.match", "line_number": 1040, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 1083, "usage_type": "call"}, {"api_name": "six.moves.filter", "line_number": 1166, "usage_type": "call"}, {"api_name": "six.moves.filter", "line_number": 1190, "usage_type": "call"}, {"api_name": "decimal.Decimal", "line_number": 1227, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 1234, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 1234, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 1234, "usage_type": "call"}, {"api_name": "re.match", "line_number": 1710, "usage_type": "call"}, {"api_name": "six.string_types", "line_number": 1740, "usage_type": "attribute"}, {"api_name": "re.sub", "line_number": 1750, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 1920, "usage_type": "attribute"}, {"api_name": "corehq.apps.locations.models.SQLLocation.active_objects.get", "line_number": 1953, "usage_type": "call"}, {"api_name": "corehq.apps.locations.models.SQLLocation.active_objects", "line_number": 1953, "usage_type": "attribute"}, {"api_name": "corehq.apps.locations.models.SQLLocation", "line_number": 1953, "usage_type": "name"}, {"api_name": "corehq.apps.locations.models.SQLLocation.DoesNotExist", "line_number": 1954, "usage_type": "attribute"}, {"api_name": "corehq.apps.locations.models.SQLLocation", "line_number": 1954, "usage_type": "name"}, {"api_name": "corehq.apps.locations.models.SQLLocation.active_objects.filter", "line_number": 1955, "usage_type": "call"}, {"api_name": "corehq.apps.locations.models.SQLLocation.active_objects", "line_number": 1955, "usage_type": "attribute"}, {"api_name": "corehq.apps.locations.models.SQLLocation", "line_number": 1955, "usage_type": "name"}, {"api_name": "django.db.models.Q", "line_number": 1957, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 1957, "usage_type": "name"}, {"api_name": "memoized.memoized", "line_number": 1937, "usage_type": "name"}, {"api_name": "corehq.apps.locations.models.SQLLocation.objects.get_or_None", "line_number": 1976, "usage_type": "call"}, {"api_name": "corehq.apps.locations.models.SQLLocation.objects", "line_number": 1976, "usage_type": "attribute"}, {"api_name": "corehq.apps.locations.models.SQLLocation", "line_number": 1976, "usage_type": "name"}, {"api_name": "memoized.memoized", "line_number": 1967, "usage_type": "name"}, {"api_name": "corehq.apps.locations.models.SQLLocation.active_objects.get", "line_number": 2015, "usage_type": "call"}, {"api_name": "corehq.apps.locations.models.SQLLocation.active_objects", "line_number": 2015, "usage_type": "attribute"}, {"api_name": "corehq.apps.locations.models.SQLLocation", "line_number": 2015, "usage_type": "name"}, {"api_name": "corehq.apps.locations.models.SQLLocation.active_objects.get", "line_number": 2022, "usage_type": "call"}, {"api_name": "corehq.apps.locations.models.SQLLocation.active_objects", "line_number": 2022, "usage_type": "attribute"}, {"api_name": "corehq.apps.locations.models.SQLLocation", "line_number": 2022, "usage_type": "name"}, {"api_name": "custom.enikshay.two_b_datamigration.models.MigratedDRTBCaseCounter.get_next_counter", "line_number": 2128, "usage_type": "call"}, {"api_name": "custom.enikshay.two_b_datamigration.models.MigratedDRTBCaseCounter", "line_number": 2128, "usage_type": "name"}, {"api_name": "custom.enikshay.user_setup.compress_nikshay_id", "line_number": 2135, "usage_type": "call"}, {"api_name": "corehq.apps.locations.dbaccessors.get_users_by_location_id", "line_number": 2145, "usage_type": "call"}, {"api_name": "corehq.apps.users.util.update_device_meta", "line_number": 2154, "usage_type": "call"}, {"api_name": "custom.enikshay.user_setup.compress_nikshay_id", "line_number": 2158, "usage_type": "call"}, {"api_name": "collections.namedtuple", "line_number": 2174, "usage_type": "call"}, {"api_name": "django.core.management.BaseCommand", "line_number": 2177, "usage_type": "name"}, {"api_name": "casexml.apps.case.mock.CaseFactory", "line_number": 2207, "usage_type": "call"}, {"api_name": "corehq.util.workbook_reading.open_any_workbook", "line_number": 2213, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 2217, "usage_type": "call"}, {"api_name": "csv.writer", "line_number": 2218, "usage_type": "call"}, {"api_name": "six.text_type", "line_number": 2228, "usage_type": "call"}, {"api_name": "traceback.format_exc", "line_number": 2252, "usage_type": "call"}, {"api_name": "six.text_type", "line_number": 2255, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 2260, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 2260, "usage_type": "attribute"}]} +{"seq_id": "146187648", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\n\nfrom django.core.urlresolvers import reverse\nfrom django.views.generic import DetailView, ListView, RedirectView, UpdateView, TemplateView\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\nfrom .models import User, Proveedor\nfrom .formularios import FormularioProveedor,FormularioUpdateCliente, FormularioCliente, FormularioUpdateProveedor, ImagenFormSet, FormularioClienteAplicaProveedor,FormularioAltaAdministrador,FormularioUpdateAdministrador\nfrom allauth.account.views import SignupView\nfrom braces.views import LoginRequiredMixin\nfrom django.views.generic.edit import FormView\nfrom django.contrib.auth.mixins import UserPassesTestMixin\nfrom construct_star.trabajos.models import TipoTrabajo, TipoPresupuestoPrevio, TipoSinPresupuestoPrevio\nfrom django.http import HttpResponse\n\n#para redirigir views\nfrom django.shortcuts import redirect\n#importar tickets para trabajar con ellos\nfrom construct_star.tickets.models import TicketBase\nfrom django.contrib.sessions.models import Session\nfrom construct_star.mensajes.models import Mensaje,MensajeAnonymous\nfrom .formularios import FormContacto\n\ndef cambiar_estado_proveedor(request):\n cambiar_a = request.POST.get('nuevo_status')\n id = request.POST.get('id')\n id = int(id)\n retorno = \"\"\n proveedor = None\n try:\n proveedor = Proveedor.objects.get(id=id)\n except:\n retorno = 'Error al buscar el proveedor'\n return HttpResponse(retorno)\n #si se encontro el usuario cambiar status\n if cambiar_a in 'ocupado':\n print('cambiar el valor a ocupado')\n #avisar que el usuario ahora esta ocupado y no quiere ser asignado a nuevos trabajos\n proveedor.buscando_nuevas_oportunidades_trabajo = False\n proveedor.save()\n retorno = 'ocupado'\n elif cambiar_a in 'buscando_oportunidades':\n print('cambiar el valor a desocupado')\n proveedor.buscando_nuevas_oportunidades_trabajo = True\n proveedor.save()\n #avisar que el usuario ahora se encuentra desocupado y desea ser asignado a nuevos trabajos\n retorno = 'desocupado'\n\n return HttpResponse(retorno)\n\n\n## PAGINAS OVERRIDE DE ERRORES ##\n# 404 page not found\ndef pagina_no_encontrada(request):\n response = render_to_response('404.html', {},\n context_instance=RequestContext(request))\n response.status_code = 404\n return response\n\n# 500 server error\ndef error_servidor(request):\n response = render_to_response('500.html', {},\n context_instance=RequestContext(request))\n response.status_code = 500\n return response\n\n# Cliente views de allauth, modificaciones minimas sobre update. CBV sobre registro.\nclass UserDetailView(LoginRequiredMixin, DetailView):\n model = User\n\n # These next two lines tell the view to index lookups by username\n slug_field = 'username'\n slug_url_kwarg = 'username'\n template_name = 'account/perfil.html'\n\n\n\n\nclass UserRedirectView(LoginRequiredMixin, RedirectView):\n #permanent = False\n\n #def get_redirect_url(self):\n #return reverse('users:detail',\n #kwargs={'username': self.request.user.username})\n\n #ver el tipo de usuario para mostrar la pagina de inicio deseada\n def get_redirect_url(self):\n usuario = self.request.user\n #es administrador\n if (usuario.is_staff):\n return reverse('inicio_admin')\n #es cliente\n elif(not(usuario.esProveedor) and not(usuario.is_staff)):\n return reverse('inicio_cliente')\n #es proveedor\n elif(usuario.esProveedor and (usuario.has_perm('users.es_proveedor'))):\n return reverse('proveedor_main')\n #es proveedor pero aun no esta autorizado\n else:\n return reverse('inicio_cliente')\n\nclass UserUpdateView(LoginRequiredMixin, UpdateView):\n #fields = ['first_name','last_name', 'sexo','fecha_nacimiento','foto_perfil','ubicacion_departamento',]\n\n # we already imported User in the view code above, remember?\n model = User\n form_class = FormularioUpdateCliente\n template_name = 'account/cliente_update.html'\n\n # send the user back to their own page after a successful update\n def get_success_url(self):\n return reverse('users:detail',\n kwargs={'username': self.request.user.username})\n\n def get_object(self):\n # Only get the User record for the user making the request\n return User.objects.get(username=self.request.user.username)\n\n def get_context_data(self, **kwargs):\n ret = super(UserUpdateView, self).get_context_data(**kwargs)\n cliente = self.get_object()\n if self.request.POST:\n ret['form'] = FormularioUpdateCliente(self.request.POST, self.request.FILES, instance=cliente, kwargs={'nombre':self.request.user.first_name})\n else:\n ret['form'] = FormularioUpdateCliente(instance=cliente)\n ret.update(self.kwargs)\n return ret\n\n def form_valid(self, form):\n user = self.request.user\n form.save(user)\n return redirect(to='home')\n\n\n\nclienteupdateview = UserUpdateView.as_view()\n\nclass UserListView(UserPassesTestMixin,LoginRequiredMixin, ListView):\n model = User\n # These next two lines tell the view to index lookups by username\n slug_field = 'username'\n slug_url_kwarg = 'username'\n login_url = 'users:404'\n redirect_field_name = 'no_autorizado'\n\n\n def test_func(self):\n return self.request.user.is_staff\n\nclass ProveedorRegistroView(SignupView):\n template_name = 'account/form_proveedor.html'\n form_class = FormularioProveedor\n redirect_field_name = 'next'\n view_name = 'registroproveedor'\n success_url = None\n\n def get_context_data(self, **kwargs):\n ret = super(ProveedorRegistroView, self).get_context_data(**kwargs)\n if self.request.POST:\n formset = ImagenFormSet(self.request.POST, self.request.FILES)\n ret['imagenes'] = formset\n else:\n formset = ImagenFormSet()\n ret['imagenes'] = formset\n ret.update(self.kwargs)\n return ret\n\n def form_invalid(self, form):\n context = self.get_context_data()\n context['errors'] = form.errors\n return render_to_response(self.template_name, context, context_instance=RequestContext(self.request))\n\n #cuando el formulario es valido\n def form_valid(self, form):\n context = self.get_context_data()\n formset = context['imagenes']\n #ver si no es valido el formulario de\n if not(formset.is_valid()):\n return render_to_response(self.template_name, self.get_context_data())\n\n #self.user = form.save(self.request)\n\n return super().form_valid(form)\n\n\nregistroproveedor = ProveedorRegistroView.as_view()\n\n\n#View para el formulario de registro de usuarios clientes\nclass ClienteRegistroView(SignupView):\n\n template_name = 'account/form_cliente.html'\n form_class = FormularioCliente\n redirect_field_name = 'next'\n view_name = 'registrocliente'\n success_url = None\n\n def get_context_data(self, **kwargs):\n ret = super(ClienteRegistroView, self).get_context_data(**kwargs)\n ret.update(self.kwargs)\n return ret\n\nregistrocliente = ClienteRegistroView.as_view()\n\n\nclass ClienteAplicaProveedor(UserPassesTestMixin, LoginRequiredMixin, FormView):\n #Primer mixins de UserPassesText y Access\n #Django 1.9 mixins, creados para uso en Class-Based Views\n #test_func override para el UserPassesText\n #login_url variable para pagina de redirect cuando test no es pasado\n\n template_name = 'account/cliente_aplica_proveedor.html'\n form_class = FormularioClienteAplicaProveedor\n view_name = 'clienteaplicaproveedor'\n success_url = 'account/gracias.html'\n login_url = 'errorAplicaProveedor'\n redirect_field_name = 'esperarValidacinCuenta'\n\n def test_func(self):\n return not self.request.user.esProveedor\n\n\n def get_context_data(self, **kwargs):\n ret = super(ClienteAplicaProveedor, self).get_context_data(**kwargs)\n if self.request.POST:\n formset = ImagenFormSet(self.request.POST, self.request.FILES)\n ret['imagenes'] = formset\n else:\n formset = ImagenFormSet()\n ret['imagenes'] = formset\n ret.update(self.kwargs)\n return ret\n\n def form_invalid(self, form):\n context = self.get_context_data()\n context['errors'] = form.errors\n return render_to_response(self.template_name, context, context_instance=RequestContext(self.request))\n\n def form_valid(self, form):\n tipoUser = self.request.user\n data = form.cleaned_data\n context = self.get_context_data()\n formset = context['imagenes']\n if not (tipoUser.id == None):\n #tipo_contenido = ContentType.objects.get_for_model(Proveedor)\n #permiso = Permission.objects.get(content_type=tipo_contenido, codename='es_proveedor')\n aplicaP = Proveedor()\n context = self.get_context_data()\n formset = context['imagenes']\n if formset.is_valid():\n tipoUser.esProveedor = True\n tipoUser.save()\n # pasar valores de usuario a proveedor\n aplicaP.get_valores_user(tipoUser)\n aplicaP.owner = tipoUser\n aplicaP.tiene_empresa = data['tiene_empresa']\n aplicaP.telefono = data['telefono']\n aplicaP.descripcion_forma_trabajar = data['descripcion_forma_trabajar']\n aplicaP.save()\n aplicaP.ubicaciones_servicios = data['ubicaciones_servicios']\n aplicaP.oficios = data['oficios']\n aplicaP.direccion = data['direccion']\n aplicaP.fines_de_semana = data['fines_de_semana']\n aplicaP.save()\n aplicaP.subir_imagenes_trabajo(self.request.FILES)\n #aplicaP.user_permissions.add(permiso)\n aplicaP.save()\n #generar nuevo ticket para la habilitacion de nuevo usuario proveedor\n #como asignar a super user????? pregunta para nico\n nuevoTicket = TicketBase()\n #Generar ticket y guardarlo\n nuevoTicket.generar_ticket_cliente_aplica_proveedor(\"admin\",aplicaP,3,\"Nuevo proveedor pidiendo para registrarse al sistema\")\n return redirect(to='esperarValidacinCuenta')\n\n return render_to_response(self.template_name, self.get_context_data())\n\nclienteaplicaproveedor = ClienteAplicaProveedor.as_view()\n\n\n# Formularios para ver y editar informacion de proveedor\n\nclass ProveedorDetailView(UserPassesTestMixin, LoginRequiredMixin, DetailView):\n model = Proveedor\n template_name = \"account/proveedor_perfil.html\"\n # These next two lines tell the view to index lookups by username\n slug_field = 'username'\n slug_url_kwarg = 'username'\n login_url = 'users:404'\n redirect_field_name = 'no_autorizado'\n\n def test_func(self):\n return self.request.user.has_perm('users.es_proveedor')\n\n\n\n\nclass ProveedorRedirectView(UserPassesTestMixin, LoginRequiredMixin, RedirectView):\n permanent = False\n login_url = 'users:404'\n redirect_field_name = 'no_autorizado'\n\n def test_func(self):\n return self.request.user.has_perm('users.es_proveedor')\n\n def get_redirect_url(self):\n return reverse('users:proveedor_actualizar',\n kwargs={'username': self.request.user.username})\n\n\nclass ProveedorUpdateView(UserPassesTestMixin, LoginRequiredMixin, UpdateView):\n\n model = Proveedor\n form_class = FormularioUpdateProveedor\n template_name = \"account/proveedor_update.html\"\n redirect_field_name = \"next\"\n login_url = 'errorAplicaProveedor'\n redirect_field_name = 'no_autorizado'\n\n def test_func(self):\n return self.request.user.has_perm('users.es_proveedor') or self.request.user.has_perm('users.necesita_arreglar_datos')\n # send the user back to their own page after a successful update\n def get_success_url(self):\n return reverse('proveedor_perfil',\n kwargs={'username': self.request.user.username})\n\n def get_object(self, queryset=None):\n # Only get the User record for the user making the request\n return Proveedor.objects.get(username=self.request.user.username)\n\n\n def get_context_data(self, **kwargs):\n ret = super(ProveedorUpdateView, self).get_context_data(**kwargs)\n proveedor = self.get_object(self)\n if self.request.POST:\n if self.request.POST.get('objectId') != '':\n ret['form'] = FormularioUpdateProveedor(self.request.POST, instance=proveedor)\n ret['ticket'] = self.request.POST.get('objectId')\n ret['imagenes'] = ImagenFormSet(self.request.POST, self.request.FILES, instance=proveedor)\n else:\n ret['form'] = FormularioUpdateProveedor(instance=proveedor)\n ret['imagenes'] = ImagenFormSet(instance=proveedor)\n if self.request.GET.get('idTicket') != None:\n ret['form'] = FormularioUpdateProveedor(instance=proveedor)\n ret['ticket'] = self.request.GET.get('idTicket')\n ret.update(self.kwargs)\n return ret\n\n def form_invalid(self, form):\n context = self.get_context_data()\n context['errors'] = form.errors\n return render_to_response(self.template_name, context, context_instance=RequestContext(self.request))\n\n #si es valido el formulario general\n def form_valid(self, form):\n tipoUser = self.request.user\n data = form.cleaned_data\n context = self.get_context_data()\n formset = context['imagenes']\n #nos fijamos si esta ingresado al sistema el usuario\n if not (tipoUser.id == None):\n aplicaP = Proveedor()\n tipoUser.sexo = data['sexo']\n tipoUser.ubicacion_departamento = data['ubicacion_departamento']\n #guardamos los cambios al usuario\n tipoUser.save()\n # pasar valores de usuario a proveedor\n aplicaP.get_valores_user(tipoUser)\n aplicaP.first_name = data['nombre']\n aplicaP.last_name = data['apellido']\n aplicaP.owner = tipoUser\n aplicaP.tiene_empresa = data['tiene_empresa']\n aplicaP.foto_perfil = data['foto_perfil']\n aplicaP.telefono = data['telefono']\n aplicaP.fecha_nacimiento = data['fecha_nacimiento']\n aplicaP.descripcion_forma_trabajar = data['descripcion_forma_trabajar']\n aplicaP.ubicaciones_servicios = data['ubicaciones_servicios']\n aplicaP.oficios = data['oficios']\n aplicaP.direccion = data['direccion']\n aplicaP.fines_de_semana = data['fines_de_semana']\n if formset.is_valid():\n formset.instance = self.object\n formset.save()\n aplicaP.save()\n if self.request.POST.get('objectId') != '':\n try:\n t = TicketBase.objects.get(id=self.request.POST.get('objectId'))\n return redirect(to='tickets:ticket_expandido', pk=t.id)\n except:\n return redirect(to='home')\n else:\n context['formset_errors'] = formset.errors\n data['formset_errors'] = formset.errors\n\n print('formset errors are: '+ str(formset.errors))\n return render_to_response(self.template_name, context)\n\n\nclass ProveedorMainView(UserPassesTestMixin,ListView):\n\n model = Proveedor\n template_name = \"pages/proveedor_main.html\"\n redirect_field_name = \"next\"\n login_url = 'users:404'\n redirect_field_name = 'no_autorizado'\n\n\n def test_func(self):\n return self.request.user.has_perm('users.es_proveedor')\n\n def get_context_data(self, **kwargs):\n context = super(ProveedorMainView, self).get_context_data(**kwargs)\n context['tiene_mensajes'] = False\n try:\n msgs = Mensaje.objects.filter(recipiente_id=self.request.user.id, status=2).latest(field_name='fecha')\n print(\"bool: \" + str(msgs))\n if msgs:\n context['tiene_mensajes'] = True\n context['mensajes_todos'] = Mensaje.objects.filter(recipiente_id=self.request.user.id).order_by('-fecha')[:10]\n except Exception as e:\n print(str(e))\n\n #tickets iniciados por usuario proveedor, ordenados por fecha\n tickets = TicketBase.objects.order_by('creado').filter(iniciado_por_id=self.request.user.id,status=7).order_by('-id').values()\n context['tickets_iniciados_por_usuario'] = tickets\n\n #retorna la cantidad de acciones pendientes para el usuario\n cantAccionesPendientes = len(tickets) # + len(trabajos_con_acciones_pendientes) -- Descomentar para sumar la otra lista cuando este pronto el query\n\n trabajos = TipoTrabajo.objects.order_by('id').filter(iniciado_por_id=self.request.user.id).order_by('-id').values()\n context['trabajos_iniciados_por_usuario'] = trabajos\n\n context['cantAccionesPendientes'] = cantAccionesPendientes\n from itertools import chain\n try:\n trabajos_conpp = TipoPresupuestoPrevio.objects.filter(proveedores_asignados_prepresupuesto__in=[self.request.user]).order_by('-id')\\\n .exclude(status=6)\n trabajos_sinpp = TipoSinPresupuestoPrevio.objects.filter(proveedores_a_ser_seleccionado__in=[self.request.user]).order_by('-id')\\\n .exclude(status=5)\n\n lst_trabajos_proveedor = list(chain(trabajos_sinpp, trabajos_conpp))\n except:\n lst_trabajos_proveedor = None\n context['lst_trabajos_proveedor'] = lst_trabajos_proveedor\n try:\n lst_trabajos1 = TipoPresupuestoPrevio.objects.filter(iniciado_por__id=self.request.user.id, es_borrador=False).exclude(status=6).order_by('-id')\n lst_trabajos2 = TipoSinPresupuestoPrevio.objects.filter(iniciado_por__id=self.request.user.id, es_borrador=False).exclude(status=5).order_by('-id')\n lst_trabajos = list(chain(lst_trabajos1, lst_trabajos2))\n except:\n print(\"listado vacio...\")\n lst_trabajos = None\n\n context['lst_trabajos'] = lst_trabajos\n # Setting cookie con session framework diciendolo client-side que fue visitado\n if self.request.session.get('visitado') == '1':\n context['visitado'] = True\n return context\n else:\n self.request.session['visitado'] = '1'\n return context\n\nproveedor_main = ProveedorMainView.as_view()\n\n#cargar datos a pagina de inicio para los administradores\nclass InicioAdminView(UserPassesTestMixin,LoginRequiredMixin,ListView):\n template_name = 'pages/admin_main.html'\n model = TicketBase\n view_name = 'inicioadmin'\n success_url = None\n login_url = 'users:404'\n redirect_field_name = 'no_autorizado'\n\n def test_func(self):\n return self.request.user.is_staff\n\n def get_context_data(self, **kwargs):\n context = super(InicioAdminView,self).get_context_data()\n\n #Mensajes anonimos para administradores\n context['tiene_mensajes'] = False\n try:\n msgs = MensajeAnonymous.objects.filter(status=1).latest(field_name='fecha')\n if msgs:\n context['tiene_mensajes'] = True\n context['mensajes_todos'] = MensajeAnonymous.objects.filter(status=1).order_by('-fecha')[:10]\n except Exception as e:\n print(\"Exception desde InicioAdminView: \" + str(e))\n #get tickets no iniciados (cerrados) -- Tickets Nuevos\n tickets_no_iniciados = TicketBase.objects.filter(status=5).order_by('-id')\n context['nuevosTickets'] = tickets_no_iniciados\n #get tickets a revisar por cambios -- Cambios desde el cliente\n tickets_a_revisar = TicketBase.objects.filter(status=6).order_by('-id')\n context['ticketsConCambios'] = tickets_a_revisar\n #get los distintos tipos de trabajo en proceso para retornarlos\n lst_trabajos_en_curso_sinpp = list(TipoSinPresupuestoPrevio.objects.filter(status=4).order_by('-id'))\n lst_trabajos_en_curso_conpp = list(TipoPresupuestoPrevio.objects.filter(status=5).order_by('-id'))\n context['trabajos_en_curso'] = lst_trabajos_en_curso_conpp + lst_trabajos_en_curso_sinpp\n\n # Setting cookie con session framework diciendolo client-side que fue visitado\n if self.request.session.get('visitado') == '1':\n context['visitado'] = True\n return context\n else:\n self.request.session['visitado'] = '1'\n return context\n\n\ninicioadmin = InicioAdminView.as_view()\n\n\n#cargar datos a pagina de inicio para los clientes\nclass InicioClienteView(LoginRequiredMixin,ListView):\n template_name = 'pages/cliente_main.html'\n model = TicketBase\n view_name = 'iniciocliente'\n success_url = None\n\n def get_context_data(self, **kwargs):\n cantAccionesPendientes = 0\n context = super(InicioClienteView,self).get_context_data()\n #get tickets que estan esperando acciones desde el cliente (status = 7)\n try:\n tickets_esperando_cambios_cliente = TicketBase.objects.filter(status=7, iniciado_por_id=self.request.user.id)\n except:\n tickets_esperando_cambios_cliente = None\n context['tiene_mensajes'] = False\n try:\n msgs = Mensaje.objects.filter(recipiente_id=self.request.user.id, status=1).latest(field_name='fecha')\n if msgs:\n context['tiene_mensajes'] = True\n context['mensajes_todos'] = Mensaje.objects.filter(recipiente_id=self.request.user.id).order_by('-fecha')[:10]\n except Exception as e:\n print(\"Exception desde InicioClienteView: \" +str(e))\n\n #try:\n # lst_trabajos = TipoTrabajo.objects.filter(iniciado_por_id=self.request.user.id,es_borrador=False)\n try:\n from itertools import chain\n\n trabajos_conpp = TipoPresupuestoPrevio.objects.filter(\n iniciado_por_id=self.request.user.id, es_borrador=False).order_by('-id') \\\n .exclude(status=6)\n trabajos_sinpp = TipoSinPresupuestoPrevio.objects.filter(\n iniciado_por_id=self.request.user.id, es_borrador=False).order_by('-id') \\\n .exclude(status=5)\n\n lst_trabajos = list(chain(trabajos_sinpp, trabajos_conpp))\n context['lst_trabajos'] = lst_trabajos\n\n except:\n lst_trabajos = None\n\n\n #buscar trabajos que son borradores para agregarlo a lista de trabajos con acciones pendientes\n try:\n lst_trabajos_accioens_pendientes = TipoTrabajo.objects.filter(es_borrador=True,iniciado_por=self.request.user)\n except:\n lst_trabajos_accioens_pendientes\n\n context['lst_trabajos'] = lst_trabajos\n context['tarbajosAccionesPendientes'] = lst_trabajos_accioens_pendientes\n context['ticketsAccionesPendientes'] = tickets_esperando_cambios_cliente\n #retornar los trabajos que el cliente esta realizando\n\n #retornar el context para que las listas se puedan cargar\n cantAccionesPendientes = len(tickets_esperando_cambios_cliente) #+ len(trabajos_con_acciones_pendientes) -- Descomentar para sumar la otra lista cuando este pronto el query\n\n #agregar a cantidad de acciones pendientes\n cantAccionesPendientes+= len(context['tarbajosAccionesPendientes'] )\n context['cantAccionesPendientes'] = cantAccionesPendientes\n # Setting cookie con session framework diciendolo client-side que fue visitado\n if self.request.session.get('visitado') == '1':\n context['visitado'] = True\n return context\n else:\n self.request.session['visitado'] = '1'\n return context\niniciocliente = InicioClienteView.as_view()\n\n\nclass HomeRedirectView(RedirectView):\n def get_redirect_url(self):\n usuario = self.request.user\n #no esta logueado\n if not usuario.is_authenticated():\n return reverse('inicio_anonymous')\n #es administrador\n elif (usuario.is_staff):\n return reverse('inicio_admin')\n #es cliente\n elif(not(usuario.esProveedor) and not(usuario.is_staff)):\n return reverse('inicio_cliente')\n #es proveedor autorizado\n elif(usuario.esProveedor and (usuario.has_perm('users.es_proveedor'))):\n return reverse('proveedor_main')\n #es proveedor pero aun no esta autorizado\n elif(usuario.esProveedor and not usuario.has_perm('users.es_proveedor')):\n return reverse('inicio_cliente')\n else:\n return reverse('home')\n\nhome_view = HomeRedirectView.as_view()\n\ndef anonymous_view(request):\n response = render_to_response('pages/home.html', {},\n context_instance=RequestContext(request))\n return response\n\n\nclass AdminListaNegraConfianza(UserPassesTestMixin,LoginRequiredMixin,TemplateView):\n model = User\n template_name = 'pages/gestion_lista_negra_confianza.html'\n view_name = 'gestion_lista_negra_confianza'\n login_url = 'users:404'\n redirect_field_name = 'no_autorizado'\n\n def test_func(self):\n return str(self.kwargs['pk']) == str(self.request.user.id)\n\n def get_context_data(self, **kwargs):\n context = super(AdminListaNegraConfianza, self).get_context_data()\n context['lista_proveedores_negra'] = self.request.user.lista_negra.all()\n context['lista_proveedores_confianza'] = self.request.user.lista_confianza.all()\n return context\n\n def post(self, request, *args, **kwargs):\n if request.POST:\n if '_borrar_proveedor_lista_negra' in request.POST:\n # Borrar proveedores seleccionados de lista negra con metodo experto\n proveedor_ids = request.POST.getlist('proveedores_lst_negra[]')\n cliente = User.objects.get(id=self.request.user.id)\n cliente.borrar_lista_negra(proveedor_ids)\n try:\n c = Proveedor.objects.get(id=self.request.user.id)\n except:\n c = User.objects.get(id=self.request.user.id)\n if c.__class__ is Proveedor:\n return redirect(to='proveedor_main')\n else:\n return redirect(to='inicio_cliente')\n elif '_borrar_proveedor_lista_confianza' in request.POST:\n # Borrar proveedores seleccionados de lista confianza con metodo experto\n proveedor_ids = request.POST.getlist('proveedores_lst_confianza[]')\n cliente = User.objects.get(id=self.request.user.id)\n cliente.borrar_lista_confianza(proveedor_ids)\n try:\n c = Proveedor.objects.get(id=self.request.user.id)\n except:\n c = User.objects.get(id=self.request.user.id)\n if c.__class__ is Proveedor:\n return redirect(to='proveedor_main')\n else:\n return redirect(to='inicio_cliente')\n else:\n return redirect(to='home')\n\nlistaconfianzanegra = AdminListaNegraConfianza.as_view()\n\n\nclass TerminarCuentaView(LoginRequiredMixin,TemplateView):\n model = User\n template_name = 'account/terminar_cuenta.html'\n view_name = 'terminar_cuenta'\n\n def get_context_data(self, **kwargs):\n context = super(TerminarCuentaView, self).get_context_data()\n try:\n c = Proveedor.objects.get(id=self.request.user.id)\n context['usuario'] = c\n except:\n c = User.objects.get(id=self.request.user.id)\n context['usuario'] = c\n return context\n\n def post(self, request, *args, **kwargs):\n if request.POST:\n if '_terminar_cuenta' in request.POST:\n try:\n user = Proveedor.objects.get(id=self.request.user.id)\n #Boramos todos los datos encoded en las variables de session por seguridad y limpieza antes del logout\n [s.delete() for s in Session.objects.all() if str(s.get_decoded().get('_auth_user_id')) == str(user.id)]\n user.is_active = False\n user.save()\n except:\n user = User.objects.get(id=self.request.user.id)\n [s.delete() for s in Session.objects.all() if str(s.get_decoded().get('_auth_user_id')) == str(user.id)]\n user.is_active = False\n user.save()\n return redirect(to='home')\n elif '_cancelar_terminacion' in request.POST:\n try:\n c = Proveedor.objects.get(id=self.request.user.id)\n except:\n c = User.objects.get(id=self.request.user.id)\n if c.__class__ is Proveedor:\n return redirect(to='proveedor_main')\n else:\n return redirect(to='inicio_cliente')\n else:\n return redirect(to='home')\n\nterminar_cuenta = TerminarCuentaView.as_view()\n\n\ndef contacto(request):\n response = render_to_response('pages/contacto.html', {},\n context_instance=RequestContext(request))\n return response\n\n\n#clase utilizada por administradores para dar de alta a usuarios del tipo Administrador\nclass AgregarAdminsitrador(FormView):\n form_class = FormularioAltaAdministrador\n view_name = 'agregaradministrador'\n template_name = 'account/agregar_admin.html'\n\n def form_valid(self, form):\n form.save()\n return redirect(to='users:listado_admin')\n\nagregaradministrador = AgregarAdminsitrador.as_view()\n\n\nclass ListadoAdministradores(UserPassesTestMixin,TemplateView):\n model = User\n view_name = 'listadoadministradores'\n template_name = 'account/listado_administrador.html'\n login_url = 'users:404'\n redirect_field_name = 'no_autorizado'\n\n def test_func(self):\n return self.request.user.is_superuser\n\n def get_context_data(self, **kwargs):\n context = super(ListadoAdministradores, self).get_context_data(**kwargs)\n #buscar usuarios administradores\n try:\n lst_administradores = User.objects.filter(is_staff=True)\n except Exception as e:\n print(e)\n\n context['lst_administradores'] = lst_administradores\n return context\n\nlistadoadministradores = ListadoAdministradores.as_view()\n\n\n\n#habilitar o deshabilitar cuenta\ndef cambiar_estado_cuenta(request):\n user_accion = request.POST.get('accion')\n user_id = request.POST.get('id')\n retorno = \"\"\n try:\n u = User.objects.get(id=user_id)\n except Exception as e:\n print(e)\n u = None\n\n #habilitar cuenta\n if u != None and user_accion == 'habilitar' and u.username != 'admin':\n u.is_active = True\n u.save()\n retorno = \"habilitado\"\n #dashabilitar cuenta\n if u != None and user_accion == 'deshabilitar' and u.username != 'admin':\n u.is_active = False\n u.save()\n retorno = \"deshabilitado\"\n\n return HttpResponse(retorno)\n\n\n\nclass AdminUpdateView(UserPassesTestMixin,LoginRequiredMixin, UpdateView):\n #fields = ['first_name','last_name', 'sexo','fecha_nacimiento','foto_perfil','ubicacion_departamento',]\n\n # we already imported User in the view code above, remember?\n model = User\n form_class = FormularioUpdateAdministrador\n template_name = 'account/update_admin.html'\n login_url = \"users:404\"\n redirect_field_name = 'no_autorizado'\n\n def test_func(self):\n try:\n t= User.objects.get(id=self.kwargs['pk'])\n return t.is_staff and self.request.user.is_superuser\n except:\n return False\n #return self.kwargs['pk'] == self.request.user.id\n\n # send the user back to their own page after a successful update\n def get_success_url(self):\n return redirect(to='users:listado_admin')\n\n def get_object(self):\n return User.objects.get(id=self.kwargs['pk'])\n\n def form_valid(self, form):\n user = self.get_object()\n form.save(user)\n return redirect(to='users:listado_admin')\n\n\nudpateadmin = AdminUpdateView.as_view()\n\n\n#view para contacto\nclass ViewContacto(FormView):\n form_class = FormContacto\n template_name = 'pages/pagina_contacto.html'\n\n def get_context_data(self, **kwargs):\n context = super(ViewContacto, self).get_context_data(**kwargs)\n return context\n\n def form_valid(self, form):\n form.enviar_email()\n return redirect(to='gracias_por_contactarnos')\n\nviewcontacto = ViewContacto.as_view()\n\n\ndef concurrency_error(request):\n response = render_to_response('pages/concurrency_error.html', {},\n context_instance=RequestContext(request))\n return response\n", "sub_path": "construct_star/users/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 33378, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "models.Proveedor.objects.get", "line_number": 32, "usage_type": "call"}, {"api_name": "models.Proveedor.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.Proveedor", "line_number": 32, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 35, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 50, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 56, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 57, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 63, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 64, "usage_type": "call"}, {"api_name": "braces.views.LoginRequiredMixin", "line_number": 69, "usage_type": "name"}, {"api_name": "django.views.generic.DetailView", "line_number": 69, "usage_type": "name"}, {"api_name": "models.User", "line_number": 70, "usage_type": "name"}, {"api_name": "braces.views.LoginRequiredMixin", "line_number": 80, "usage_type": "name"}, {"api_name": "django.views.generic.RedirectView", "line_number": 80, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 92, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 95, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 98, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 101, "usage_type": "call"}, {"api_name": "braces.views.LoginRequiredMixin", "line_number": 103, "usage_type": "name"}, {"api_name": "django.views.generic.UpdateView", "line_number": 103, "usage_type": "name"}, {"api_name": "models.User", "line_number": 107, "usage_type": "name"}, {"api_name": "formularios.FormularioUpdateCliente", "line_number": 108, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 113, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 118, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 118, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 118, "usage_type": "name"}, {"api_name": "formularios.FormularioUpdateCliente", "line_number": 124, "usage_type": "call"}, {"api_name": "formularios.FormularioUpdateCliente", "line_number": 126, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 133, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.UserPassesTestMixin", "line_number": 139, "usage_type": "name"}, {"api_name": "braces.views.LoginRequiredMixin", "line_number": 139, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 139, "usage_type": "name"}, {"api_name": "models.User", "line_number": 140, "usage_type": "name"}, {"api_name": "allauth.account.views.SignupView", "line_number": 151, "usage_type": "name"}, {"api_name": "formularios.FormularioProveedor", "line_number": 153, "usage_type": "name"}, {"api_name": "formularios.ImagenFormSet", "line_number": 161, "usage_type": "call"}, {"api_name": "formularios.ImagenFormSet", "line_number": 164, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 172, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 172, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 180, "usage_type": "call"}, {"api_name": "allauth.account.views.SignupView", "line_number": 191, "usage_type": "name"}, {"api_name": "formularios.FormularioCliente", "line_number": 194, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.UserPassesTestMixin", "line_number": 207, "usage_type": "name"}, {"api_name": "braces.views.LoginRequiredMixin", "line_number": 207, "usage_type": "name"}, {"api_name": "django.views.generic.edit.FormView", "line_number": 207, "usage_type": "name"}, {"api_name": "formularios.FormularioClienteAplicaProveedor", "line_number": 214, "usage_type": "name"}, {"api_name": "formularios.ImagenFormSet", "line_number": 227, "usage_type": "call"}, {"api_name": "formularios.ImagenFormSet", "line_number": 230, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 238, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 238, "usage_type": "call"}, {"api_name": "models.Proveedor", "line_number": 248, "usage_type": "call"}, {"api_name": "construct_star.tickets.models.TicketBase", "line_number": 271, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 274, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 276, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.UserPassesTestMixin", "line_number": 283, "usage_type": "name"}, {"api_name": "braces.views.LoginRequiredMixin", "line_number": 283, "usage_type": "name"}, {"api_name": "django.views.generic.DetailView", "line_number": 283, "usage_type": "name"}, {"api_name": "models.Proveedor", "line_number": 284, "usage_type": "name"}, {"api_name": "django.contrib.auth.mixins.UserPassesTestMixin", "line_number": 298, "usage_type": "name"}, {"api_name": "braces.views.LoginRequiredMixin", "line_number": 298, "usage_type": "name"}, {"api_name": "django.views.generic.RedirectView", "line_number": 298, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 307, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.UserPassesTestMixin", "line_number": 311, "usage_type": "name"}, {"api_name": "braces.views.LoginRequiredMixin", "line_number": 311, "usage_type": "name"}, {"api_name": "django.views.generic.UpdateView", "line_number": 311, "usage_type": "name"}, {"api_name": "models.Proveedor", "line_number": 313, "usage_type": "name"}, {"api_name": "formularios.FormularioUpdateProveedor", "line_number": 314, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 324, "usage_type": "call"}, {"api_name": "models.Proveedor.objects.get", "line_number": 329, "usage_type": "call"}, {"api_name": "models.Proveedor.objects", "line_number": 329, "usage_type": "attribute"}, {"api_name": "models.Proveedor", "line_number": 329, "usage_type": "name"}, {"api_name": "formularios.FormularioUpdateProveedor", "line_number": 337, "usage_type": "call"}, {"api_name": "formularios.ImagenFormSet", "line_number": 339, "usage_type": "call"}, {"api_name": "formularios.FormularioUpdateProveedor", "line_number": 341, "usage_type": "call"}, {"api_name": "formularios.ImagenFormSet", "line_number": 342, "usage_type": "call"}, {"api_name": "formularios.FormularioUpdateProveedor", "line_number": 344, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 352, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 352, "usage_type": "call"}, {"api_name": "models.Proveedor", "line_number": 362, "usage_type": "call"}, {"api_name": "construct_star.tickets.models.TicketBase.objects.get", "line_number": 387, "usage_type": "call"}, {"api_name": "construct_star.tickets.models.TicketBase.objects", "line_number": 387, "usage_type": "attribute"}, {"api_name": "construct_star.tickets.models.TicketBase", "line_number": 387, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 388, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 390, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 396, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.UserPassesTestMixin", "line_number": 399, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 399, "usage_type": "name"}, {"api_name": "models.Proveedor", "line_number": 401, "usage_type": "name"}, {"api_name": "construct_star.mensajes.models.Mensaje.objects.filter", "line_number": 415, "usage_type": "call"}, {"api_name": "construct_star.mensajes.models.Mensaje.objects", "line_number": 415, "usage_type": "attribute"}, {"api_name": "construct_star.mensajes.models.Mensaje", "line_number": 415, "usage_type": "name"}, {"api_name": "construct_star.mensajes.models.Mensaje.objects.filter", "line_number": 419, "usage_type": "call"}, {"api_name": "construct_star.mensajes.models.Mensaje.objects", "line_number": 419, "usage_type": "attribute"}, {"api_name": "construct_star.mensajes.models.Mensaje", "line_number": 419, "usage_type": "name"}, {"api_name": "construct_star.tickets.models.TicketBase.objects.order_by", "line_number": 424, "usage_type": "call"}, {"api_name": "construct_star.tickets.models.TicketBase.objects", "line_number": 424, "usage_type": "attribute"}, {"api_name": "construct_star.tickets.models.TicketBase", "line_number": 424, "usage_type": "name"}, {"api_name": "construct_star.trabajos.models.TipoTrabajo.objects.order_by", "line_number": 430, "usage_type": "call"}, {"api_name": "construct_star.trabajos.models.TipoTrabajo.objects", "line_number": 430, "usage_type": "attribute"}, {"api_name": "construct_star.trabajos.models.TipoTrabajo", "line_number": 430, "usage_type": "name"}, {"api_name": "construct_star.trabajos.models.TipoPresupuestoPrevio.objects.filter", "line_number": 436, "usage_type": "call"}, {"api_name": "construct_star.trabajos.models.TipoPresupuestoPrevio.objects", "line_number": 436, "usage_type": "attribute"}, {"api_name": "construct_star.trabajos.models.TipoPresupuestoPrevio", "line_number": 436, "usage_type": "name"}, {"api_name": "construct_star.trabajos.models.TipoSinPresupuestoPrevio.objects.filter", "line_number": 438, "usage_type": "call"}, {"api_name": "construct_star.trabajos.models.TipoSinPresupuestoPrevio.objects", "line_number": 438, "usage_type": "attribute"}, {"api_name": "construct_star.trabajos.models.TipoSinPresupuestoPrevio", "line_number": 438, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 441, "usage_type": "call"}, {"api_name": "construct_star.trabajos.models.TipoPresupuestoPrevio.objects.filter", "line_number": 446, "usage_type": "call"}, {"api_name": "construct_star.trabajos.models.TipoPresupuestoPrevio.objects", "line_number": 446, "usage_type": "attribute"}, {"api_name": "construct_star.trabajos.models.TipoPresupuestoPrevio", "line_number": 446, "usage_type": "name"}, {"api_name": "construct_star.trabajos.models.TipoSinPresupuestoPrevio.objects.filter", "line_number": 447, "usage_type": "call"}, {"api_name": "construct_star.trabajos.models.TipoSinPresupuestoPrevio.objects", "line_number": 447, "usage_type": "attribute"}, {"api_name": "construct_star.trabajos.models.TipoSinPresupuestoPrevio", "line_number": 447, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 448, "usage_type": "call"}, {"api_name": "{'chain': 'itertools.chain'}.as_view", "line_number": 462, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.UserPassesTestMixin", "line_number": 465, "usage_type": "name"}, {"api_name": "braces.views.LoginRequiredMixin", "line_number": 465, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 465, "usage_type": "name"}, {"api_name": "construct_star.tickets.models.TicketBase", "line_number": 467, "usage_type": "name"}, {"api_name": "construct_star.mensajes.models.MensajeAnonymous.objects.filter", "line_number": 482, "usage_type": "call"}, {"api_name": "construct_star.mensajes.models.MensajeAnonymous.objects", "line_number": 482, "usage_type": "attribute"}, {"api_name": "construct_star.mensajes.models.MensajeAnonymous", "line_number": 482, "usage_type": "name"}, {"api_name": "construct_star.mensajes.models.MensajeAnonymous.objects.filter", "line_number": 485, "usage_type": "call"}, {"api_name": "construct_star.mensajes.models.MensajeAnonymous.objects", "line_number": 485, "usage_type": "attribute"}, {"api_name": "construct_star.mensajes.models.MensajeAnonymous", "line_number": 485, "usage_type": "name"}, {"api_name": "construct_star.tickets.models.TicketBase.objects.filter", "line_number": 489, "usage_type": "call"}, {"api_name": "construct_star.tickets.models.TicketBase.objects", "line_number": 489, "usage_type": "attribute"}, {"api_name": "construct_star.tickets.models.TicketBase", "line_number": 489, "usage_type": "name"}, {"api_name": "construct_star.tickets.models.TicketBase.objects.filter", "line_number": 492, "usage_type": "call"}, {"api_name": "construct_star.tickets.models.TicketBase.objects", "line_number": 492, "usage_type": "attribute"}, {"api_name": "construct_star.tickets.models.TicketBase", "line_number": 492, "usage_type": "name"}, {"api_name": "construct_star.trabajos.models.TipoSinPresupuestoPrevio.objects.filter", "line_number": 495, "usage_type": "call"}, {"api_name": "construct_star.trabajos.models.TipoSinPresupuestoPrevio.objects", "line_number": 495, "usage_type": "attribute"}, {"api_name": "construct_star.trabajos.models.TipoSinPresupuestoPrevio", "line_number": 495, "usage_type": "name"}, {"api_name": "construct_star.trabajos.models.TipoPresupuestoPrevio.objects.filter", "line_number": 496, "usage_type": "call"}, {"api_name": "construct_star.trabajos.models.TipoPresupuestoPrevio.objects", "line_number": 496, "usage_type": "attribute"}, {"api_name": "construct_star.trabajos.models.TipoPresupuestoPrevio", "line_number": 496, "usage_type": "name"}, {"api_name": "braces.views.LoginRequiredMixin", "line_number": 512, "usage_type": "name"}, {"api_name": "django.views.generic.ListView", "line_number": 512, "usage_type": "name"}, {"api_name": "construct_star.tickets.models.TicketBase", "line_number": 514, "usage_type": "name"}, {"api_name": "construct_star.tickets.models.TicketBase.objects.filter", "line_number": 523, "usage_type": "call"}, {"api_name": "construct_star.tickets.models.TicketBase.objects", "line_number": 523, "usage_type": "attribute"}, {"api_name": "construct_star.tickets.models.TicketBase", "line_number": 523, "usage_type": "name"}, {"api_name": "construct_star.mensajes.models.Mensaje.objects.filter", "line_number": 528, "usage_type": "call"}, {"api_name": "construct_star.mensajes.models.Mensaje.objects", "line_number": 528, "usage_type": "attribute"}, {"api_name": "construct_star.mensajes.models.Mensaje", "line_number": 528, "usage_type": "name"}, {"api_name": "construct_star.mensajes.models.Mensaje.objects.filter", "line_number": 531, "usage_type": "call"}, {"api_name": "construct_star.mensajes.models.Mensaje.objects", "line_number": 531, "usage_type": "attribute"}, {"api_name": "construct_star.mensajes.models.Mensaje", "line_number": 531, "usage_type": "name"}, {"api_name": "construct_star.trabajos.models.TipoPresupuestoPrevio.objects.filter", "line_number": 540, "usage_type": "call"}, {"api_name": "construct_star.trabajos.models.TipoPresupuestoPrevio.objects", "line_number": 540, "usage_type": "attribute"}, {"api_name": "construct_star.trabajos.models.TipoPresupuestoPrevio", "line_number": 540, "usage_type": "name"}, {"api_name": "construct_star.trabajos.models.TipoSinPresupuestoPrevio.objects.filter", "line_number": 543, "usage_type": "call"}, {"api_name": "construct_star.trabajos.models.TipoSinPresupuestoPrevio.objects", "line_number": 543, "usage_type": "attribute"}, {"api_name": "construct_star.trabajos.models.TipoSinPresupuestoPrevio", "line_number": 543, "usage_type": "name"}, {"api_name": "itertools.chain", "line_number": 547, "usage_type": "call"}, {"api_name": "construct_star.trabajos.models.TipoTrabajo.objects.filter", "line_number": 556, "usage_type": "call"}, {"api_name": "construct_star.trabajos.models.TipoTrabajo.objects", "line_number": 556, "usage_type": "attribute"}, {"api_name": "construct_star.trabajos.models.TipoTrabajo", "line_number": 556, "usage_type": "name"}, {"api_name": "{'chain': 'itertools.chain'}.as_view", "line_number": 578, "usage_type": "call"}, {"api_name": "django.views.generic.RedirectView", "line_number": 581, "usage_type": "name"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 586, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 589, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 592, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 595, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 598, "usage_type": "call"}, {"api_name": "django.core.urlresolvers.reverse", "line_number": 600, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 605, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 606, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.UserPassesTestMixin", "line_number": 610, "usage_type": "name"}, {"api_name": "braces.views.LoginRequiredMixin", "line_number": 610, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 610, "usage_type": "name"}, {"api_name": "models.User", "line_number": 611, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 631, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 631, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 631, "usage_type": "name"}, {"api_name": "models.Proveedor.objects.get", "line_number": 634, "usage_type": "call"}, {"api_name": "models.Proveedor.objects", "line_number": 634, "usage_type": "attribute"}, {"api_name": "models.Proveedor", "line_number": 634, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 636, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 636, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 636, "usage_type": "name"}, {"api_name": "models.Proveedor", "line_number": 637, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 638, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 640, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 644, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 644, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 644, "usage_type": "name"}, {"api_name": "models.Proveedor.objects.get", "line_number": 647, "usage_type": "call"}, {"api_name": "models.Proveedor.objects", "line_number": 647, "usage_type": "attribute"}, {"api_name": "models.Proveedor", "line_number": 647, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 649, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 649, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 649, "usage_type": "name"}, {"api_name": "models.Proveedor", "line_number": 650, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 651, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 653, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 655, "usage_type": "call"}, {"api_name": "braces.views.LoginRequiredMixin", "line_number": 660, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 660, "usage_type": "name"}, {"api_name": "models.User", "line_number": 661, "usage_type": "name"}, {"api_name": "models.Proveedor.objects.get", "line_number": 668, "usage_type": "call"}, {"api_name": "models.Proveedor.objects", "line_number": 668, "usage_type": "attribute"}, {"api_name": "models.Proveedor", "line_number": 668, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 671, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 671, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 671, "usage_type": "name"}, {"api_name": "models.Proveedor.objects.get", "line_number": 679, "usage_type": "call"}, {"api_name": "models.Proveedor.objects", "line_number": 679, "usage_type": "attribute"}, {"api_name": "models.Proveedor", "line_number": 679, "usage_type": "name"}, {"api_name": "django.contrib.sessions.models.Session.objects.all", "line_number": 681, "usage_type": "call"}, {"api_name": "django.contrib.sessions.models.Session.objects", "line_number": 681, "usage_type": "attribute"}, {"api_name": "django.contrib.sessions.models.Session", "line_number": 681, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 685, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 685, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 685, "usage_type": "name"}, {"api_name": "django.contrib.sessions.models.Session.objects.all", "line_number": 686, "usage_type": "call"}, {"api_name": "django.contrib.sessions.models.Session.objects", "line_number": 686, "usage_type": "attribute"}, {"api_name": "django.contrib.sessions.models.Session", "line_number": 686, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 689, "usage_type": "call"}, {"api_name": "models.Proveedor.objects.get", "line_number": 692, "usage_type": "call"}, {"api_name": "models.Proveedor.objects", "line_number": 692, "usage_type": "attribute"}, {"api_name": "models.Proveedor", "line_number": 692, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 694, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 694, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 694, "usage_type": "name"}, {"api_name": "models.Proveedor", "line_number": 695, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 696, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 698, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 700, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 706, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 707, "usage_type": "call"}, {"api_name": "django.views.generic.edit.FormView", "line_number": 712, "usage_type": "name"}, {"api_name": "formularios.FormularioAltaAdministrador", "line_number": 713, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 719, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.UserPassesTestMixin", "line_number": 724, "usage_type": "name"}, {"api_name": "django.views.generic.TemplateView", "line_number": 724, "usage_type": "name"}, {"api_name": "models.User", "line_number": 725, "usage_type": "name"}, {"api_name": "models.User.objects.filter", "line_number": 738, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 738, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 738, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 755, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 755, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 755, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 771, "usage_type": "call"}, {"api_name": "django.contrib.auth.mixins.UserPassesTestMixin", "line_number": 775, "usage_type": "name"}, {"api_name": "braces.views.LoginRequiredMixin", "line_number": 775, "usage_type": "name"}, {"api_name": "django.views.generic.UpdateView", "line_number": 775, "usage_type": "name"}, {"api_name": "models.User", "line_number": 779, "usage_type": "name"}, {"api_name": "formularios.FormularioUpdateAdministrador", "line_number": 780, "usage_type": "name"}, {"api_name": "models.User.objects.get", "line_number": 787, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 787, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 787, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 795, "usage_type": "call"}, {"api_name": "models.User.objects.get", "line_number": 798, "usage_type": "call"}, {"api_name": "models.User.objects", "line_number": 798, "usage_type": "attribute"}, {"api_name": "models.User", "line_number": 798, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 803, "usage_type": "call"}, {"api_name": "django.views.generic.edit.FormView", "line_number": 810, "usage_type": "name"}, {"api_name": "formularios.FormContacto", "line_number": 811, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 820, "usage_type": "call"}, {"api_name": "django.shortcuts.render_to_response", "line_number": 826, "usage_type": "call"}, {"api_name": "django.template.RequestContext", "line_number": 827, "usage_type": "call"}]} +{"seq_id": "604190912", "text": "# Script for controlling the whole setup automagically\nimport ThorlabsCam as TC\nimport SLM\nimport ThorlabsMotor as TM\nimport TemperatureControllerTED4015\nimport find_particle_threshold as fpt\nimport read_dict_from_file as rdff\nfrom instrumental import u\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport threading, time, cv2, queue, copy, sys, tkinter, os, pickle\nfrom tkinter import messagebox\nfrom tkinter import filedialog as fd\nfrom functools import partial\nfrom datetime import datetime\nfrom cv2 import VideoWriter, VideoWriter_fourcc\nfrom tkinter import * # TODO Should avoid this type of import statements.\nimport PIL.Image, PIL.ImageTk\nfrom pypylon import pylon\n\ndef get_recording_path(base_path='F:/Martin/D', extension_path=\"\"):\n now = datetime.now()\n recording_path = base_path + str(now.year) \\\n + '-' + str(now.month) + '-' + str(now.day)\n recording_path = recording_path + extension_path if len(extension_path) > 0 else recording_path\n print(recording_path)\n try:\n os.mkdir(recording_path)\n except:\n print('Directory already exist')\n return recording_path\n\n\ndef get_default_c_p(recording_path=None):\n '''\n Dictionary containing primarily parameters used for specifying the\n experiment and synchronizing\n the program threads, such as current trap and motor serial numbers.\n '''\n # TODO : Consider to change this into a class.\n # Make this object possible to pickle and unpickle to make it easier to\n # reuse settings.\n # TODO: Change so that xm, ym are in pixels rather than the arbitrary\n # units they use now.\n if recording_path is None:\n recording_path = get_recording_path()\n c_p = {\n 'serial_nums_motors': ['27502438','27502419'],\n 'serial_no_piezo': '97100532',\n 'channel': 1,\n 'network_path': 'G:/',\n 'recording_path': recording_path,\n 'polling_rate': 100,\n 'program_running': True, # True if camera etc should keep updating\n 'motor_running': True, # Should the motor thread keep running\n 'zoomed_in': False, # Keeps track of whether the image is cropped or\n # not\n 'recording': False, # True if recording is on\n 'AOI': [0, 672, 0, 512], # Default for basler camera [0,1200,0,1000] TC\n 'new_AOI_camera': False,\n 'new_AOI_display': False,\n 'new_phasemask': False,\n 'phasemask_updated': False, # True if the phasemask is to be udpated\n 'SLM_iterations': 30,\n 'movement_threshold': 30,\n 'nbr_experiments':1,\n 'framerate': 500,\n 'recording': False,\n 'tracking_on': False,\n 'setpoint_temperature': 25,\n 'current_temperature': 25,\n 'starting_temperature': 23.4,\n 'temperature_controller_connected': False,\n 'temperature_stable': False,\n 'temperature_output_on':True,\n 'need_T_stable':False,\n 'search_direction': 'up',\n 'particle_centers': [[500], [500]],\n 'target_particle_center': [500, 500], # Position of the particle we\n # currently are trying to trap. Used to minimize changes in code when\n # updating to multiparticle tracking.\n 'target_trap_pos': [500, 500],\n 'motor_movements': [0, 0], # How much x and y motor should be moved\n 'motor_starting_pos': [0, 0], # Startng position of x-y motors,\n # needed for z-compensation\n 'motor_current_pos': [0, 0, 0], # Current position of x-y motors,\n # needed for z-compensation, z is the last\n 'motors_connected':[False, False, False],\n 'connect_motor':[True, True, True],\n 'z_starting_position': 0, # Where the experiments starts in z position\n 'z_movement': 0, # Target z-movement in \"ticks\" positive for up,\n # negative for down\n 'target_experiment_z': 150, # height in ticks at which experiment should\n # be performed\n 'z_x_diff': 200, # Used for compensating drift in z when moving the\n # sample. Caused by sample being slightly tilted Needs to be calibrated\n # calculated as the change needed in z (measured in steps) when the\n # motor is moved 1 mm in positive direction z_x_diff = (z1-z0)/(x1-x0) steps/mm\n # Sign ,+ or -,of this?\n 'z_y_diff': -400, # approximate, has not measured this\n 'x_start': 0,\n 'temperature_z_diff': 0,#-180, #-80, # How much the objective need to be moved\n # in ticks when the objective is heated 1C. Needs to be calibrated manually.\n # to compensate for the changes in temperature.Measured in\n # [ticks/deg C]\n\n 'slm_x_center': 720,#700,#711, # needs to be recalibrated if camera is moved.\n # This is the position of the 0th order of the SLM (ie where the trap)\n # with xm=ym=0 is located in camera coordinates\n 'slm_y_center': 605,#594 seem to be a tiny bit off, +5?\n 'slm_to_pixel': 5000000.0, # Basler\n #4550000.0,# Thorlabs\n\n 'return_z_home': False,\n 'focus_threshold':1_000, #\n 'particle_threshold': 100,\n 'particle_size_threshold': 200, # Parcticle detection threshold\n 'bright_particle': True, # Is particle brighter than the background?\n 'xy_movement_limit': 1200,\n 'motor_locks': [threading.Lock(), threading.Lock()],\n\n 'use_LGO':[False],\n 'LGO_order': -8,\n 'exposure_time':80, # ExposureTime in micro s\n 'SLM_iterations':5,\n 'trap_separation_x':20e-6,\n 'trap_separation_y':20e-6,\n 'new_video':False,\n 'recording_duration':3000,\n 'experiment_schedule':[],\n 'experiment_progress':0, # number of experiments run\n 'experiment_runtime':0, # How many seconds have the experiment been running\n 'activate_traps_one_by_one':True, # If true then the program will\n # activate and fill traps one by one.\n 'camera_model':'basler',\n 'cell_width':32, # Width of cells when dividing the frame into a grid\n # for the path-search\n }\n\n # Set traps positions\n c_p['traps_absolute_pos'] = np.zeros((2,1))\n c_p['traps_relative_pos'] = np.zeros((2,1))\n\n # Position of first trap\n c_p['traps_absolute_pos'][0][0] = 678\n c_p['traps_absolute_pos'][1][0] = 465\n c_p['traps_relative_pos'][0][0] = 678\n c_p['traps_relative_pos'][1][0] = 465\n c_p['xm'], c_p['ym'] = SLM.get_xm_ym_rect(\n nbr_rows=2, nbr_columns=2,\n d0x=-50e-6, d0y=-50e-6)\n c_p['zm'] = np.zeros(len(c_p['xm']))\n\n # Cannot call SLM_loc_to_trap_loc until c_p has been created so we manually\n # converto from xm,ym to trap locs here\n tmp_x = [x * c_p['slm_to_pixel'] + c_p['slm_x_center'] for x in c_p['xm']]\n tmp_y = [y * c_p['slm_to_pixel'] + c_p['slm_y_center'] for y in c_p['ym']]\n tmp = np.asarray([tmp_x, tmp_y])\n c_p['traps_absolute_pos'] = tmp\n\n c_p['traps_occupied'] = [False for i in range(len(c_p['traps_absolute_pos'][0]))]\n c_p['phasemask'] = np.zeros((1080, 1080)) # phasemask size\n return c_p\n\n\ndef terminate_threads():\n '''\n Function for terminating all threads.\n\n Returns\n -------\n None.\n\n '''\n c_p['program_running'] = False\n c_p['motor_running'] = False\n c_p['tracking_on'] = False\n time.sleep(1)\n global thread_list\n for thread in thread_list:\n thread.join()\n for thread in thread_list:\n del thread\n\n\ndef start_threads(cam=True, motor_x=True, motor_y=True, motor_z=True, slm=True, tracking=True, isaac=False, temp=True):\n\n \"\"\"\n Function for starting all the threads, should only be called once!\n \"\"\"\n global thread_list\n global c_p\n\n if cam:\n camera_thread = CameraThread(1, 'Thread-camera')\n camera_thread.start()\n thread_list.append(camera_thread)\n print('Camera thread started')\n\n if motor_x:\n try:\n motor_X_thread = MotorThread(2,'Thread-motorX',0)\n motor_X_thread.start()\n thread_list.append(motor_X_thread)\n print('Motor x thread started')\n except:\n print('Could not start motor x thread')\n\n if motor_y:\n try:\n motor_Y_thread = MotorThread(3,'Thread-motorY',1)\n motor_Y_thread.start()\n thread_list.append(motor_Y_thread)\n print('Motor y thread started')\n except:\n print('Could not start motor y thread')\n\n if motor_z:\n try:\n z_thread = z_movement_thread(4, 'z-thread',serial_no=c_p['serial_no_piezo'],channel=c_p['channel'])\n z_thread.start()\n thread_list.append(z_thread)\n print('Motor z thread started')\n except:\n print('Could not start motor z thread')\n\n if slm:\n slm_thread =CreateSLMThread(5,'Thread-SLM')\n slm_thread.start()\n thread_list.append(slm_thread)\n print('SLM thread started')\n\n if tracking:\n tracking_thread = ExperimentControlThread(6,'Tracker_thread')\n tracking_thread.start()\n thread_list.append(tracking_thread)\n print('Tracking thread started')\n\n if temp:\n\n try:\n temperature_controller = TemperatureControllerTED4015.TED4015()\n except:\n temperature_controller = None\n print('problem connecting to temperature controller')\n temperature_thread = TemperatureThread(7,'Temperature_thread',temperature_controller=temperature_controller)\n temperature_thread.start()\n thread_list.append(temperature_thread)\n print('Temperature thread started')\n\n\nclass CreateSLMThread(threading.Thread):\n def __init__(self, threadID, name):\n '''\n Thread for controlling the SLM creation. When new_phasemask set to true\n the phasemask is updated.\n Parameters\n ----------\n threadID : int\n Thread number.\n name : string\n Name of thread.\n\n Returns\n -------\n None.\n\n '''\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.setDaemon(True)\n\n def run(self):\n '''\n Thread calculates the new phasemask when the parameter 'new phasemask'\n is set to true. It does this using the control parameters (xm, ym) for\n particle positions. use_LGO to determine if to use LGO or not.\n\n '''\n global c_p\n\n c_p['xm'], c_p['ym'] = SLM.get_default_xm_ym()\n c_p['zm'] = np.zeros(len(c_p['xm']))\n Delta, N, M = SLM.get_delta(xm=c_p['xm'], ym=c_p['ym'], zm=c_p['zm'],\n use_LGO=c_p['use_LGO'],\n order=c_p['LGO_order'])\n\n c_p['phasemask'] = SLM.GSW(\n N, M, Delta, nbr_iterations=c_p['SLM_iterations'])\n\n c_p['phasemask_updated'] = True\n SLM_loc_to_trap_loc(xm=c_p['xm'], ym=c_p['ym'])\n\n\n c_p['traps_occupied'] =\\\n [False for i in range(len(c_p['traps_absolute_pos'][0]))]\n\n while c_p['program_running']:\n if c_p['new_phasemask']:\n # Calcualte new delta and phasemask\n # TODO check zm\n Delta, N, M = SLM.get_delta(xm=c_p['xm'], ym=c_p['ym'],\n zm=c_p['zm'],\n use_LGO=c_p['use_LGO'],\n order=c_p['LGO_order'])\n if M==2:\n print('Using normal Grechbgerg-Saxton since there are 2 traps')\n c_p['phasemask'] = SLM.GS(\n N, M, Delta,\n nbr_iterations=c_p['SLM_iterations'])\n else:\n c_p['phasemask'] = SLM.GSW(\n N, M, Delta,\n nbr_iterations=c_p['SLM_iterations'])\n c_p['phasemask_updated'] = True\n c_p['new_phasemask'] = False\n\n # Update the number of traps and their position\n SLM_loc_to_trap_loc(xm=c_p['xm'], ym=c_p['ym'])\n print(c_p['traps_absolute_pos'])\n c_p['traps_occupied'] =\\\n [False for i in range(len(c_p['traps_absolute_pos'][0]))]\n time.sleep(0.5)\n\n\nclass TemperatureThread(threading.Thread):\n '''\n Class for running the temperature controller in the background\n '''\n def __init__(self, threadID, name, temperature_controller=None, max_diff=0.05):\n '''\n Parameters\n ----------\n threadID : int\n Thread id number.\n name : String\n Name of thread.\n temperature_controller : temperaturecontroller, optional\n Controller of objective temperature. The default is None.\n max_diff : Float, optional\n Maximum value by which temperature is allowed to deviate from\n target temperature for temperature to be considered as stable.\n The default is 0.01.\n\n Returns\n -------\n None.\n\n '''\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.temperature_history = []\n self.temp_hist_length = 100\n self.max_diff = max_diff\n if temperature_controller is not None:\n self.temperature_controller = temperature_controller\n c_p['starting_temperature'] =\\\n self.temperature_controller.measure_temperature()\n c_p['current_temperature'] =\\\n c_p['starting_temperature']\n c_p['setpoint_temperature'] = c_p['starting_temperature']\n c_p['temperature_controller_connected'] = True\n else:\n try:\n self.temperature_controller =\\\n TemperatureControllerTED4015.TED4015_controller()\n c_p['starting_temperature'] =\\\n self.temperature_controller.measure_temperature()\n c_p['current_temperature'] =\\\n c_p['starting_temperature']\n c_p['setpoint_temperature'] = c_p['starting_temperature']\n c_p['temperature_controller_connected'] = True\n except:\n # Handling the case of not having a temperature controller\n print('\\nWARNING, COULD NOT ESTABLISH CONTACT WITH \\\n TEMEPERATURE CONTROLLER!\\n')\n self.temperature_controller = None\n self.setDaemon(True)\n\n def run(self):\n global c_p\n if self.temperature_controller is not None:\n # Turn on output and continuosly set and query the temperature.\n if c_p['temperature_output_on']:\n self.temperature_controller.turn_on_output()\n while c_p['program_running']:\n if 0 < c_p['setpoint_temperature'] < 40:\n self.temperature_controller.set_setpoint_temperature(c_p['setpoint_temperature'])\n else:\n print('Setpoint temperature NOK')\n c_p['current_temperature'] =\\\n self.temperature_controller.measure_temperature()\n self.temperature_history.append(\n c_p['current_temperature'])\n\n # Update and check history\n if len(self.temperature_history)>self.temp_hist_length:\n self.temperature_history.pop()\n history = [T-c_p['setpoint_temperature'] for T in self.temperature_history]\n if max(np.abs(history)) 0:\n c_p['experiment_schedule'] = experiment_list\n print('Starting the following experiment. \\n', experiment_list)\n # Reset experiment progress\n if c_p['tracking_on']:\n c_p['tracking_on'] = False\n time.sleep(0.5)\n c_p['experiment_progress'] = 0\n time.sleep(0.2)\n c_p['tracking_on'] = True\n else:\n c_p['experiment_progress'] = 0\n c_p['nbr_experiments'] = len(c_p['experiment_schedule'])\n # Update recording path\n name = filepath[filepath.rfind('/')+1:filepath.rfind('.')]\n c_p['recording_path'] = get_recording_path(extension_path='_'+name)\n else:\n print('Invalid or empty file.')\n\n def create_trap_image(self):\n global c_p\n trap_x = c_p['traps_absolute_pos'][0]\n trap_y = c_p['traps_absolute_pos'][1]\n particle_x = c_p['particle_centers'][0]\n particle_y = c_p['particle_centers'][1]\n AOI = c_p['AOI']\n # Define new mini-image\n mini_image = np.zeros((200,240,3))\n scale_factor = 5\n\n l = int(round(AOI[2]/scale_factor)) # left\n r = int(round(AOI[3]/scale_factor)) # right\n u = int(round(AOI[0]/scale_factor)) # up\n d = int(round(AOI[1]/scale_factor)) # down\n\n # Draw the traps\n if len(trap_x) > 0 and len(trap_x) == len(trap_y):\n for x, y in zip(trap_x, trap_y):\n # Round down and recalculate\n x = int(round(x/scale_factor))\n y = int(round(y/scale_factor))\n\n if 1 <= x <= 239 and 1 <= y <= 199:\n mini_image[(y-1):(y+2),(x-1):(x+2),0] = 255\n\n # Draw the particles\n if len(particle_x) > 0 and len(particle_x) == len(particle_y):\n for x, y in zip(particle_x, particle_y):\n # Round down and recalculate\n x = int(round(x/scale_factor + u))\n y = int(round(y/scale_factor + l))\n if 1 <= x <= 239 and 1 <= y <= 199:\n mini_image[y-1:y+2,x-1:x+2,2] = 255\n\n # Draw the AOI\n try:\n mini_image[l,u:d,1:2] = 255 # Left edge\n mini_image[l:r,u,1:2] = 255 # Upper edge\n mini_image[r,u:d,1:2] = 255 # Right edge\n mini_image[l:r,d,1:2] = 255 # Bottom edge\n except:\n mini_image[0,0:-1,1:2] = 255 # Left edge\n mini_image[0:-1,0,1:2] = 255 # Upper edge\n mini_image[-1,0:-1,1:2] = 255 # Right edge\n mini_image[0:-1,-1,1:2] = 255 # Bottom edge\n\n self.mini_image = mini_image.astype('uint8')\n\n def create_buttons(self,top=None):\n if top is None:\n top = self.window\n\n def get_y_separation(start=50, distance=40):\n # Simple generator to avoid printing all the y-positions of the\n # buttons\n index = 0\n while True:\n yield start + (distance * index)\n index += 1\n\n global c_p\n\n # TODO add home z button\n # TODO: Check if we can change colors of buttons by making buttons part of\n # object.\n\n up_button = tkinter.Button(top, text='Move up',\n command=partial(move_button, 0))\n down_button = tkinter.Button(top, text='Move down',\n command=partial(move_button, 1))\n right_button = tkinter.Button(top, text='Move right',\n command=partial(move_button, 2))\n left_button = tkinter.Button(top, text='Move left',\n command=partial(move_button, 3))\n start_record_button = tkinter.Button(top, text='Start recording',\n command=start_record)\n stop_record_button = tkinter.Button(top, text='Stop recording',\n command=stop_record)\n toggle_bright_particle_button = tkinter.Button(\n top, text='Toggle particle brightness',\n command=toggle_bright_particle)\n\n threshold_entry = tkinter.Entry(top, bd=5)\n temperature_entry = tkinter.Entry(top, bd=5)\n exposure_entry = tkinter.Entry(top, bd=5)\n\n toggle_tracking_button = tkinter.Button(\n top, text='Toggle particle tracking', command=toggle_tracking)\n\n def set_threshold():\n entry = threshold_entry.get()\n try:\n threshold = int(entry)\n if 0 < threshold < 255:\n c_p['particle_threshold'] = threshold\n print(\"Threshold set to \", threshold)\n else:\n print('Threshold out of bounds')\n except:\n print('Cannot convert entry to integer')\n threshold_entry.delete(0, last=5000)\n\n def set_temperature():\n entry = temperature_entry.get()\n try:\n temperature = float(entry)\n if 20 < temperature < 40:\n c_p['setpoint_temperature'] = temperature\n print(\"Temperature set to \", temperature)\n else:\n print('Temperature out of bounds, it is no good to cook or \\\n freeze your samples')\n except:\n print('Cannot convert entry to integer')\n temperature_entry.delete(0, last=5000)\n\n def set_exposure():\n if c_p['camera_model'] == 'basler':\n entry = exposure_entry.get()\n try:\n exposure_time = int(entry)\n if 59 < exposure_time < 4e5: # If you need more than that you are\n c_p['exposure_time'] = exposure_time\n print(\"Exposure time set to \", exposure_time)\n c_p['new_AOI_camera'] = True\n else:\n print('Exposure time out of bounds!')\n except:\n print('Cannot convert entry to integer')\n exposure_entry.delete(0, last=5000)\n\n def connect_disconnect_motorX():\n # TODO: Behaviour of this might be odd if the motor did not get connected.\n c_p['connect_motor'][0] = not c_p['connect_motor'][0]\n\n def connect_disconnect_motorY():\n c_p['connect_motor'][1] = not c_p['connect_motor'][1]\n\n def connect_disconnect_piezo():\n c_p['connect_motor'][2] = not c_p['connect_motor'][2]\n\n threshold_button = tkinter.Button(\n top, text='Set threshold', command=set_threshold)\n focus_up_button = tkinter.Button(\n top, text='Move focus up', command=focus_up)\n focus_down_button = tkinter.Button(\n top, text='Move focus down', command=focus_down)\n temperature_button = tkinter.Button(\n top, text='Set setpoint temperature', command=set_temperature)\n zoom_in_button = tkinter.Button(top, text='Zoom in', command=zoom_in)\n zoom_out_button = tkinter.Button(top, text='Zoom out', command=zoom_out)\n temperature_output_button = tkinter.Button(top,\n text='toggle temperature output', command=toggle_temperature_output)\n set_exposure_button = tkinter.Button(top, text='Set exposure(basler)', command=set_exposure)\n experiment_schedule_button = tkinter.Button(top,\n text='Select experiment schedule',\n command=self.read_experiment_dictionary\n )\n # Motor buttons. Attributes of UserInterface class os we can easily change\n # the description text of them.\n self.toggle_motorX_button = tkinter.Button(\n top, text='Connect motor x', command=connect_disconnect_motorX)\n self.toggle_motorY_button = tkinter.Button(\n top, text='Connect motor y', command=connect_disconnect_motorY)\n self.toggle_piezo_button = tkinter.Button(\n top, text='Connect piezo motor', command=connect_disconnect_piezo)\n\n x_position = 1220\n x_position_2 = 1420\n y_position = get_y_separation()\n y_position_2 = get_y_separation()\n\n # Place all the buttons, starting with first column\n up_button.place(x=x_position, y=y_position.__next__())\n down_button.place(x=x_position, y=y_position.__next__())\n right_button.place(x=x_position, y=y_position.__next__())\n left_button.place(x=x_position, y=y_position.__next__())\n start_record_button.place(x=x_position, y=y_position.__next__())\n stop_record_button.place(x=x_position, y=y_position.__next__())\n toggle_bright_particle_button.place(x=x_position, y=y_position.__next__())\n threshold_entry.place(x=x_position, y=y_position.__next__())\n threshold_button.place(x=x_position, y=y_position.__next__())\n toggle_tracking_button.place(x=x_position, y=y_position.__next__())\n focus_up_button.place(x=x_position, y=y_position.__next__())\n focus_down_button.place(x=x_position, y=y_position.__next__())\n temperature_entry.place(x=x_position, y=y_position.__next__())\n temperature_button.place(x=x_position, y=y_position.__next__())\n zoom_in_button.place(x=x_position, y=y_position.__next__())\n zoom_out_button.place(x=x_position, y=y_position.__next__())\n\n # Second column\n temperature_output_button.place(x=x_position_2, y=y_position_2.__next__())\n exposure_entry.place(x=x_position_2, y=y_position_2.__next__())\n set_exposure_button.place(x=x_position_2, y=y_position_2.__next__())\n experiment_schedule_button.place(x=x_position_2, y=y_position_2.__next__())\n self.toggle_motorX_button.place(x=x_position_2, y=y_position_2.__next__())\n self.toggle_motorY_button.place(x=x_position_2, y=y_position_2.__next__())\n self.toggle_piezo_button.place(x=x_position_2, y=y_position_2.__next__())\n\n def create_SLM_window(self, _class):\n try:\n if self.new.state() == \"normal\":\n self.new.focus()\n except:\n self.new = tkinter.Toplevel(self.window)\n self.SLM_Window = _class(self.new)\n\n def snapshot(self):\n global image\n global c_p\n cv2.imwrite(c_p['recording_path'] + \"/frame-\" +\\\n time.strftime(\"%d-%m-%Y-%H-%M-%S\") +\\\n \".jpg\", cv2.cvtColor(image, cv2.COLOR_RGB2BGR))\n\n def get_temperature_info(self):\n global c_p\n if c_p['temperature_controller_connected']:\n temperature_info = 'Current objective temperature is: '+str(c_p['current_temperature'])+' C'+'\\n setpoint temperature is: '+str(c_p['setpoint_temperature'])+' C'\n if c_p['temperature_stable']:\n temperature_info += '\\nTemperature is stable. '\n else:\n temperature_info += '\\nTemperature is not stable. '\n if c_p['temperature_output_on']:\n temperature_info += '\\n Temperature controller output is on.'\n else:\n temperature_info += '\\n Temperature controller output is off.'\n else:\n temperature_info = 'Temperature controller is not connected.'\n\n\n return temperature_info\n\n def get_position_info(self):\n\n global c_p\n # Add position info\n position_text = 'x: '+str(c_p['motor_current_pos'][0])+\\\n 'mm y: '+str(c_p['motor_current_pos'][1])+\\\n 'mm z: '+str(c_p['motor_current_pos'][2])\n position_text += '\\n Experiments run ' + str(c_p['experiment_progress'])\n position_text += ' out of ' + str(c_p['nbr_experiments'])\n position_text += ' ' + str(c_p['experiment_runtime']) + 's run out of ' + str(c_p['recording_duration'])\n position_text += '\\n Current search direction is: ' + str(c_p['search_direction'] + '\\n')\n\n # Add motor connection info\n x_connected = 'connected. ' if c_p['motors_connected'][0] else 'disconnected.'\n y_connected = 'connected. ' if c_p['motors_connected'][1] else 'disconnected.'\n piezo_connected = 'connected. ' if c_p['motors_connected'][2] else 'disconnected.'\n\n position_text += 'Motor-X is ' + x_connected\n position_text += ' Motor-Y is ' + y_connected + '\\n'\n position_text += ' Focus (piezo) motor is ' + piezo_connected + '\\n'\n\n return position_text\n\n def update_motor_buttons(self):\n # Motor connection buttons\n x_connect = 'Disconnect' if c_p['connect_motor'][0] else 'Connect'\n self.toggle_motorX_button.config(text=x_connect + ' motor x')\n y_connect = 'Disconnect' if c_p['connect_motor'][1] else 'Connect'\n self.toggle_motorY_button.config(text=y_connect + ' motor y')\n piezo_connected = 'Disconnect' if c_p['connect_motor'][2] else 'Connect'\n self.toggle_piezo_button.config(text=piezo_connected + ' piezo motor')\n\n def create_indicators(self):\n global c_p\n # Update if recording is turned on or not\n if c_p['recording']:\n self.recording_label = Label(\n self.window, text='recording is on', bg='green')\n else:\n self.recording_label = Label(\n self.window, text='recording is off', bg='red')\n self.recording_label.place(x=1220, y=750)\n\n if c_p['tracking_on']:\n self.tracking_label = Label(\n self.window, text='particle tracking is on', bg='green')\n else:\n self.tracking_label = Label(\n self.window, text='particle tracking is off', bg='red')\n self.tracking_label.place(x=1220, y=780)\n\n self.position_label = Label(self.window, text=self.get_position_info())\n self.position_label.place(x=1420, y=400)\n self.temperature_label = Label(self.window, text=self.get_temperature_info())\n self.temperature_label.place(x=1420, y=540)\n\n def update_indicators(self):\n '''\n Helper function for updating on-screen indicators\n '''\n # TODO: Try an incorporate some of the labels into the buttons.\n global c_p\n # Update if recording is turned on or not\n if c_p['recording']:\n self.recording_label.config(text='recording is on', bg='green')\n else:\n self.recording_label.config(text='recording is off', bg='red')\n\n if c_p['tracking_on']:\n self.tracking_label.config(text='particle tracking is on',bg='green')\n else:\n self.tracking_label.config(text='particle tracking is off', bg='red')\n\n self.temperature_label.config(text=self.get_temperature_info())\n\n self.position_label.config(text=self.get_position_info())\n\n self.update_motor_buttons()\n\n def resize_display_image(self, img):\n img_size = np.shape(img)\n if img_size[1]==self.canvas_width or img_size[0] == self.canvas_height:\n return img\n\n if img_size[1]/self.canvas_width > img_size[0]/self.canvas_height:\n dim = (int(self.canvas_width/img_size[1]*img_size[0]), int(self.canvas_width))\n else:\n dim = ( int(self.canvas_height), int(self.canvas_height/img_size[0]*img_size[1]))\n return cv2.resize(img, (dim[1],dim[0]), interpolation = cv2.INTER_AREA)\n\n def update(self):\n # Get a frame from the video source\n global image\n if c_p['phasemask_updated']:\n print('New phasemask')\n self.SLM_Window.update()\n c_p['phasemask_updated'] = False\n self.update_indicators()\n self.photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(self.resize_display_image(image)))\n self.canvas.create_image(0, 0, image = self.photo, anchor = tkinter.NW) # need to use a compatible image type\n\n # Update mini-window\n self.create_trap_image()\n self.mini_photo = PIL.ImageTk.PhotoImage(image = PIL.Image.fromarray(self.mini_image, mode='RGB'))\n self.mini_canvas.create_image(0, 0, image = self.mini_photo, anchor = tkinter.NW) # need to use a compatible image type\n\n self.window.after(self.delay, self.update)\n\n\nclass SLM_window(Frame):\n global c_p\n\n def __init__(self, master=None):\n Frame.__init__(self, master)\n self.master = master\n self.master.geometry(\"1920x1080+1920+0\")\n self.pack(fill=BOTH, expand=1)\n\n render = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(c_p['phasemask']))\n self.img = Label(self, image=render)\n self.img.place(x=420, y=0)\n self.img.image = image\n self.delay = 500\n self.update()\n\n def update(self):\n # This implementation does work but is perhaps a tiny bit janky\n self.photo = PIL.ImageTk.PhotoImage(image=PIL.Image.fromarray(c_p['phasemask']))\n del self.img.image\n self.img = Label(self, image=self.photo)\n self.img.image = self.photo # This ate lots of memory\n self.img.place(x=420, y=0) # Do not think this is needed\n\n\nclass MotorThread(threading.Thread):\n '''\n Thread in which a motor is controlled. The motor object is available globally.\n '''\n # TODO: Try removing the treadlocks on the motors.\n # Try replacing some of the c_p with events.\n def __init__(self, threadID, name, axis):\n\n threading.Thread.__init__(self)\n global c_p\n self.threadID = threadID\n self.name = name\n self.axis = axis # 0 = x-axis, 1 = y axis\n\n # Initiate contact with motor\n if self.axis == 0 or self.axis == 1:\n self.motor = TM.InitiateMotor(c_p['serial_nums_motors'][self.axis],\n pollingRate=c_p['polling_rate'])\n else:\n raise Exception(\"Invalid choice of axis, no motor available.\")\n\n # Read motor starting position\n if self.motor is not None:\n c_p['motor_starting_pos'][self.axis] = float(str(self.motor.Position))\n print('Motor is at ', c_p['motor_starting_pos'][self.axis])\n c_p['motors_connected'][self.axis] = True\n else:\n c_p['motors_connected'][self.axis] = False\n self.setDaemon(True)\n\n def run(self):\n print('Running motor thread')\n global c_p\n while c_p['motor_running']:\n # If motor connected and it should be connected, check for next move\n if c_p['motors_connected'][self.axis] and \\\n c_p['connect_motor'][self.axis] and c_p['motors_connected'][self.axis]:\n # Acquire lock to ensure that it is safe to move the motor\n with c_p['motor_locks'][self.axis]:\n if np.abs(c_p['motor_movements'][self.axis])>0:\n # The movement limit must be positive\n c_p['xy_movement_limit'] = np.abs(c_p['xy_movement_limit'])\n # Check how much the motor is allowed to move\n\n if np.abs(c_p['motor_movements'][self.axis])<=c_p['xy_movement_limit']:\n TM.MoveMotorPixels(self.motor, c_p['motor_movements'][self.axis])\n else:\n if c_p['motor_movements'][self.axis]>0:\n TM.MoveMotorPixels(self.motor, c_p['xy_movement_limit'])\n else:\n TM.MoveMotorPixels(self.motor, -c_p['xy_movement_limit'])\n c_p['motor_movements'][self.axis] = 0\n c_p['motor_current_pos'][self.axis] = float(str(self.motor.Position))\n # Motor is connected but should be disconnected\n elif c_p['motors_connected'][self.axis] and not c_p['connect_motor'][self.axis]:\n TM.DisconnectMotor(self.motor)\n c_p['motors_connected'][self.axis] = False\n self.motor = None\n # Motor is not connected but should be\n elif not c_p['motors_connected'][self.axis] and c_p['connect_motor'][self.axis]:\n self.motor = TM.InitiateMotor(c_p['serial_nums_motors'][self.axis],\n pollingRate=c_p['polling_rate'])\n # Check if motor was successfully connected.\n if self.motor is not None:\n c_p['motors_connected'][self.axis] = True\n else:\n motor_ = 'x' if self.axis == 0 else 'y'\n print('Failed to connect motor '+motor_)\n time.sleep(0.1) # To give other threads some time to work\n if c_p['motors_connected'][self.axis]:\n TM.DisconnectMotor(self.motor)\n\n\ndef compensate_focus():\n '''\n Function for compensating the change in focus caused by x-y movement.\n Returns the positon in ticks which z should take to compensate for the focus\n '''\n global c_p\n new_z_pos = (c_p['z_starting_position']\n +c_p['z_x_diff']*(c_p['motor_starting_pos'][0] - c_p['motor_current_pos'][0])\n +c_p['z_y_diff']*(c_p['motor_starting_pos'][1] - c_p['motor_current_pos'][1]) )\n new_z_pos += c_p['temperature_z_diff']*(c_p['current_temperature']-c_p['starting_temperature'])\n return int(new_z_pos)\n\n\nclass z_movement_thread(threading.Thread):\n '''\n Thread for controling movement of the objective in z-direction.\n Will also help with automagically adjusting the focus to the sample.\n '''\n def __init__(self, threadID, name, serial_no, channel, polling_rate=250):\n threading.Thread.__init__(self)\n global c_p\n self.threadID = threadID\n self.name = name\n self.piezo = TM.PiezoMotor(serial_no, channel=channel, pollingRate=polling_rate)\n if self.piezo.is_connected:\n c_p['z_starting_position'] = self.piezo.get_position()\n c_p['motor_current_pos'][2] = self.piezo.get_position()\n c_p['motors_connected'][2] = self.piezo.is_connected\n self.setDaemon(True)\n\n def run(self):\n global c_p\n lifting_distance = 0\n while c_p['program_running']:\n c_p['motors_connected'][2] = self.piezo.is_connected\n\n # Check if piezo connected and should be connected\n if self.piezo.is_connected and c_p['connect_motor'][2]:\n\n # Check if the objective should be moved\n self.piezo.move_to_position(compensate_focus()+lifting_distance)\n\n if c_p['z_movement'] is not 0:\n c_p['z_movement'] = int(c_p['z_movement'])\n # Move up if we are not already up\n if self.piezo.move_relative(c_p['z_movement']):\n lifting_distance += c_p['z_movement']\n c_p['z_movement'] = 0\n\n elif c_p['return_z_home'] and c_p['motor_current_pos'][2]>compensate_focus():\n lifting_distance -= min(40,c_p['motor_current_pos'][2]-compensate_focus())\n # Compensating for hysteresis effect in movement\n print('homing z')\n if c_p['motor_current_pos'][2]<=compensate_focus() or c_p['z_movement'] != 0:\n c_p['return_z_home'] = False\n if self.piezo.is_connected:\n c_p['motor_current_pos'][2] = self.piezo.get_position()\n\n # Piezomotor not connected but should be\n elif not self.piezo.is_connected and c_p['connect_motor'][2]:\n self.piezo.connect_piezo_motor()\n\n # Piezo motor connected but should not be\n elif self.piezo.is_connected and not c_p['connect_motor'][2]:\n self.piezo.disconnect_piezo()\n\n time.sleep(0.3)\n del(self.piezo)\n\n\nclass CameraThread(threading.Thread):\n\n def __init__(self, threadID, name):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n # Initalize camera and global image\n if c_p['camera_model'] == 'ThorlabsCam':\n # Get a thorlabs camera\n self.cam = TC.get_camera()\n self.cam.set_defaults(left=c_p['AOI'][0], right=c_p['AOI'][1], top=c_p['AOI'][2], bot=c_p['AOI'][3], n_frames=1)\n exposure_time = TC.find_exposure_time(cam, targetIntensity=70) # automagically finds a decent exposure time\n print('Exposure time = ', exposure_time)\n else:\n # Get a basler camera\n tlf = pylon.TlFactory.GetInstance()\n self.cam = pylon.InstantCamera(tlf.CreateFirstDevice())\n self.cam.Open()\n image = np.zeros((672,512,1))\n self.setDaemon(True)\n\n def __del__(self):\n if c_p['camera_model'] == 'basler':\n self.cam.Close()\n else:\n self.cam.close()\n\n def get_important_parameters(self):\n global c_p\n parameter_dict = {\n 'xm':c_p['xm'],\n 'ym':c_p['ym'],\n 'zm':c_p['zm'],\n 'use_LGO':c_p['use_LGO'],\n 'LGO_order':c_p['LGO_order'],\n 'setpoint_temperature':c_p['setpoint_temperature'],\n 'target_experiment_z':c_p['target_experiment_z'],\n 'temperature_output_on':c_p['temperature_output_on'],\n 'exposure_time':c_p['exposure_time'],\n 'starting_temperature':c_p['current_temperature'],\n }\n return parameter_dict\n\n def create_video_writer(self):\n '''\n Funciton for creating a VideoWriter.\n Will also save the relevant parameters of the experiments.\n '''\n global c_p\n now = datetime.now()\n fourcc = VideoWriter_fourcc(*'MJPG')\n image_width = c_p['AOI'][1]-c_p['AOI'][0]\n image_height = c_p['AOI'][3]-c_p['AOI'][2]\n video_name = c_p['recording_path'] + '/video-' + \\\n str(now.hour) + '-' + str(now.minute) + '-' + str(now.second)+'.avi'\n experiment_info_name =c_p['recording_path'] + '/video-' + \\\n str(now.hour) + '-' + str(now.minute) + '-' + str(now.second) + '_info'\n\n video = VideoWriter(video_name, fourcc,\n float(c_p['framerate']),\n (image_width, image_height), isColor=False)\n exp_info_params = self.get_important_parameters()\n return video, experiment_info_name, exp_info_params\n\n def thorlabs_capture(self):\n number_images_saved = 0 # counts\n video_created = False\n global c_p\n\n while c_p['program_running']:\n # Set defaults for camera, aknowledge that this has been done\n self.cam.set_defaults(left=c_p['AOI'][0],\n right=c_p['AOI'][1],\n top=c_p['AOI'][2],\n bot=c_p['AOI'][3])\n c_p['new_AOI_camera'] = False\n # Grab one example image\n global image\n image = self.cam.grab_image(n_frames=1)\n image_count = 0\n # Start livefeed from the camera\n\n # Setting maximum framerate. Will cap it to make it stable\n self.cam.start_live_video(\n framerate=str(c_p['framerate']) + 'hertz' )\n\n start = time.time()\n\n # Create an array to store the images which have been captured in\n if not video_created:\n video, experiment_info_name, exp_info_params = self.create_video_writer()\n video_created = True\n # Start continously capturin images now that the camera parameters have been set\n while c_p['program_running']\\\n and not c_p['new_AOI_camera']:\n self.cam.wait_for_frame(timeout=None)\n if c_p['recording']:\n video.write(image)\n # Capture an image and update the image count\n image_count = image_count+1\n image[:][:][:] = self.cam.latest_frame()\n\n\n video.release()\n\n del video\n video_created = False\n # Close the livefeed and calculate the fps of the captures\n end = time.time()\n self.cam.stop_live_video()\n fps = image_count/(end-start)\n print('Capture sequence finished', image_count,\n 'Images captured in ', end-start, 'seconds. \\n FPS is ',\n fps)\n # Save the experiment data in a pickled dict.\n outfile = open(experiment_info_name, 'wb')\n exp_info_params['fps'] = fps\n pickle.dump(exp_info_params, outfile)\n outfile.close()\n\n def set_basler_AOI(self):\n '''\n Function for setting AOI of basler camera to c_p['AOI']\n '''\n global c_p\n\n try:\n # The order in which you set the size and offset parameters matter.\n # If you ever get the offset + width greater than max width the\n # camera won't accept your valuse. Thereof the if-else-statements\n # below. Conditions might need to be changed if the usecase of this\n # funciton change\n c_p['AOI'][1] -= np.mod(c_p['AOI'][1]-c_p['AOI'][0],16)\n c_p['AOI'][3] -= np.mod(c_p['AOI'][3]-c_p['AOI'][2],16)\n\n width = int(c_p['AOI'][1] - c_p['AOI'][0])\n offset_x = 672 - width - c_p['AOI'][0]\n height = int(c_p['AOI'][3] - c_p['AOI'][2])\n offset_y = 512 - height - c_p['AOI'][2]\n\n self.cam.OffsetX = 0\n self.cam.Width = width\n self.cam.OffsetX = offset_x\n self.cam.OffsetY = 0\n self.cam.Height = height\n self.cam.OffsetY = offset_y\n except Exception as e:\n print('AOI not accepted',c_p['AOI'])\n print(e)\n\n def basler_capture(self):\n number_images_saved = 0 # counts\n video_created = False\n global c_p\n img = pylon.PylonImage()\n\n while c_p['program_running']:\n # Set defaults for camera, aknowledge that this has been done\n\n self.set_basler_AOI()\n c_p['new_AOI_camera'] = False\n try:\n self.cam.ExposureTime = c_p['exposure_time']\n c_p['framerate'] = self.cam.ResultingFrameRate.GetValue()\n print('Read framerate to ', c_p['framerate'], ' fps.')\n except:\n print('Exposure time not accepted by camera')\n # Grab one example image\n image_count = 0\n\n global image\n self.cam.StartGrabbing()\n\n start = time.time()\n\n # Create an array to store the images which have been captured in\n if not video_created:\n video, experiment_info_name, exp_info_params = self.create_video_writer()\n video_created = True\n # Start continously capturin images now that the camera parameters have been set\n while c_p['program_running']\\\n and not c_p['new_AOI_camera']:\n\n with self.cam.RetrieveResult(2000) as result:\n img.AttachGrabResultBuffer(result)\n image = np.flip(img.GetArray(),axis=(0,1)) # Testing to flip this guy\n img.Release()\n if c_p['recording']:\n video.write(image)\n # Capture an image and update the image count\n image_count = image_count+1\n\n video.release()\n self.cam.StopGrabbing()\n del video\n video_created = False\n # Close the livefeed and calculate the fps of the captures\n end = time.time()\n\n # Calculate FPS\n fps = image_count/(end-start)\n print('Capture sequence finished', image_count,\n 'Images captured in ', end-start, 'seconds. \\n FPS is ',\n fps)\n\n # Save the experiment data in a pickled dict.\n outfile = open(experiment_info_name, 'wb')\n exp_info_params['fps'] = fps\n pickle.dump(exp_info_params, outfile)\n outfile.close()\n\n def run(self):\n if c_p['camera_model'] == 'ThorlabsCam':\n self.thorlabs_capture()\n elif c_p['camera_model'] == 'basler':\n self.basler_capture()\n\n\nclass ExperimentControlThread(threading.Thread):\n '''\n Thread which does the tracking.\n '''\n def __init__(self, threadID, name):\n threading.Thread.__init__(self)\n self.threadID = threadID\n self.name = name\n self.setDaemon(True)\n\n def __del__(self):\n c_p['tracking_on'] = False\n\n def catch_particle(self, min_index_trap=None, min_index_particle=None):\n '''\n Function for determimning where and how to move when min_index_particle\n has been found\n '''\n global c_p\n if min_index_particle is not None:\n c_p['target_trap_pos'] = [c_p['traps_relative_pos'][0][min_index_trap],\n c_p['traps_relative_pos'][1][min_index_trap]]\n c_p['target_particle_center'] = [c_p['particle_centers'][0][min_index_particle],\n c_p['particle_centers'][1][min_index_particle]]\n\n if True in c_p['traps_occupied']:\n c_p['xy_movement_limit'] = 40\n # Some traps are occupied. Want to avoid catching more than one\n # particle per trap.\n filled_traps_locs = []\n for idx, occupied in enumerate(c_p['traps_occupied']):\n if occupied:\n filled_traps_locs.append([c_p['traps_relative_pos'][0][idx],\n c_p['traps_relative_pos'][1][idx] ])\n x, y, success = path_search(filled_traps_locs,\n target_particle_location=c_p['target_particle_center'],\n target_trap_location=c_p['target_trap_pos'])\n else:\n success = False\n c_p['xy_movement_limit'] = 1200\n if success:\n c_p['motor_movements'][0] = -x\n c_p['motor_movements'][1] = y\n else:\n c_p['motor_movements'][0] = -(c_p['target_trap_pos'][0] - c_p['target_particle_center'][0]) # Note: Sign of this depends on setup\n c_p['motor_movements'][1] = c_p['target_trap_pos'][1] - c_p['target_particle_center'][1]\n\n else:\n c_p['target_particle_center'] = []\n\n def lift_for_experiment(self,patiance=3):\n '''\n Assumes that all particles have been caught.\n patiance, how long(s) we allow a trap to be unoccipied for\n\n Returns true if lift succeded\n '''\n z_starting_pos = compensate_focus()\n patiance_counter = 0\n print('Lifting time. Starting from ', z_starting_pos)\n while c_p['target_experiment_z'] > c_p['motor_current_pos'][2] - z_starting_pos:\n time.sleep(0.2)\n all_filled, nbr_particles, min_index_trap, min_index_particle =\\\n self.check_exp_conditions()\n if all_filled:\n c_p['z_movement'] = 40\n c_p['return_z_home'] = False\n patiance_counter = 0\n else:\n patiance_counter += 1\n if patiance_counter >= patiance or not c_p['tracking_on']:\n c_p['return_z_home'] = True\n c_p['z_movement'] = 0\n return False\n print('Lifting done. Now at', c_p['motor_current_pos'][2])\n return True\n\n def check_exp_conditions(self, tracking_func=None):\n '''\n Checks if all traps are occupied. Returns true if this is the case.\n Tries to catch the closes unoccupied particle.\n '''\n if tracking_func is None:\n x, y = fpt.find_particle_centers(copy.copy(image),\n threshold=c_p['particle_threshold'],\n particle_size_threshold=c_p['particle_size_threshold'],\n bright_particle=c_p['bright_particle'])\n else:\n x, y = tracking_func(copy.copy(image))\n\n c_p['particle_centers'] = [x, y]\n c_p['traps_occupied'] = [False for i in range(len(c_p['traps_absolute_pos'][0]))]\n min_index_trap, min_index_particle = find_closest_unoccupied()\n\n if False not in c_p['traps_occupied']:\n # All traps have been occupied\n return True, -1, min_index_trap, min_index_particle\n # Not all traps have been occupied, might need to go searching\n return False, len(x), min_index_trap, min_index_particle\n\n def run_experiment(self, duration):\n '''\n Run an experiment for 'duration'.\n Returns 0 if it ran to the end without interruption otherwise it\n returns the amount of time remaining of the experiment.\n '''\n start = time.time()\n\n c_p['recording'] = True\n zoom_in()\n patiance = 50\n patiance_counter = 0\n while time.time() <= start + duration and c_p['tracking_on']:\n all_filled, nbr_particles, min_index_trap, min_index_particle =\\\n self.check_exp_conditions()\n if all_filled:\n patiance_counter = 0\n time.sleep(1)\n #print('Experiment is running', self.check_exp_conditions())\n c_p['experiment_runtime'] = np.round(time.time() - start)\n else:\n patiance_counter += 1\n if patiance_counter > patiance:\n break\n zoom_out()\n c_p['recording'] = False\n if time.time() >= start + duration:\n return 0\n return start + duration - time.time()\n\n def run(self):\n '''\n Plan - have an experiment procedure.\n Before each experiment is allowed to start make sure all traps\n which are supposed to be filled are filled.\n Then lift the particles and start recording. If any particle\n is dropped go down and refill the traps and continue* the\n experiment.\n In between each experiment(when experiment parameters are to be changed)\n try to move the particles which are already trapped rather than\n cathing new ones (unless this is necessary). Then change all desired parameters.\n\n Control the experiments with a experiment Dictionary which\n keeps track of 'c_p' which are to be changed during the experiment.\n For instance might desire to change LGO orders as well as\n particle distance and temperature,then this should be possible\n\n * Do not record the full length but only the missing part of the video\n '''\n\n # TODO make the program understand when two particles have been trapped\n # in the same trap. - Possible solution: Train an AI to detect this.\n global image\n global c_p\n c_p['nbr_experiments'] = len(c_p['experiment_schedule'])\n c_p['experiment_progress'] = 0\n\n while c_p['program_running']: # Change to continue tracking?\n time.sleep(0.3)\n # Look through the whole shedule, a list of dictionaries.\n\n if c_p['tracking_on']:\n setup_dict = c_p['experiment_schedule'][c_p['experiment_progress']]\n print('Next experiment is', setup_dict)\n run_finished = False\n update_c_p(setup_dict)\n full_xm = copy.copy(c_p['xm']) # for adding one trap at a time.\n full_ym = copy.copy(c_p['ym']) # Using copy since\n time_remaining = c_p['recording_duration']\n all_filled, nbr_particles, min_index_trap, min_index_particle = self.check_exp_conditions()\n\n # Check if we need to go down and look for more particles in\n # between the experiments\n if all_filled:\n nbr_active_traps = len(full_xm)\n else:\n # Not all traps were filled, need to home and activate\n # only the first trap\n c_p['return_z_home'] = True\n time.sleep(1)\n if c_p['activate_traps_one_by_one']:\n nbr_active_traps = min(3,len(full_xm))\n active_traps_dict = {'xm':full_xm[:nbr_active_traps],\n 'ym':full_ym[:nbr_active_traps]}\n update_c_p(active_traps_dict)\n # Start looking for particles.\n while not run_finished and c_p['tracking_on']:\n time.sleep(0.3)\n # We are (probably) in full frame mode looking for a particle\n\n all_filled, nbr_particles, min_index_trap, min_index_particle = self.check_exp_conditions()\n\n if not all_filled and nbr_particles <= c_p['traps_occupied'].count(True):\n # Fewer particles than traps. Look for more particles.\n c_p['return_z_home'] = True\n search_for_particles()\n\n elif not all_filled and nbr_particles > c_p['traps_occupied'].count(True):\n # Untrapped particles and unfilled traps. Catch particles\n self.catch_particle(min_index_trap=min_index_trap,\n min_index_particle=min_index_particle)\n\n elif all_filled:\n\n # All active traps are filled, activate a new one if\n # there are more to activate\n if len(c_p['xm']) < len(full_xm):\n nbr_active_traps += 1\n active_traps_dict = {'xm':full_xm[:nbr_active_traps],\n 'ym':full_ym[:nbr_active_traps]}\n update_c_p(active_traps_dict)\n\n # No more traps to activate, can start lifting.\n elif self.lift_for_experiment():\n print('lifted!')\n # Particles lifted, can start experiment.\n time_remaining = self.run_experiment(time_remaining)\n else:\n c_p['return_z_home'] = True\n\n if time_remaining < 1:\n run_finished = True\n c_p['experiment_progress'] += 1\n\n c_p['return_z_home'] = True\n\n\ndef get_adjacency_matrix(nx, ny):\n '''\n Function for calculating the adjacency matrix used in graph theory to\n describe which nodes are neighbours.\n '''\n X, Y = np.meshgrid(\n np.arange(0, nx),\n np.arange(0, ny)\n )\n\n nbr_nodes = nx*ny\n XF = np.reshape(X, (nbr_nodes, 1))\n YF = np.reshape(Y, (nbr_nodes, 1))\n adjacency_matrix = np.zeros((nbr_nodes, nbr_nodes))\n for idx in range(nx*ny):\n distance_map = (X - XF[idx])**2 + (Y - YF[idx])**2\n adjacency_matrix[idx, :] = np.reshape(distance_map, (nbr_nodes)) <= 3\n adjacency_matrix[idx, idx] = 0\n return adjacency_matrix\n\n\ndef path_search(filled_traps_locs, target_particle_location,\n target_trap_location):\n '''\n Function for finding paths to move the stage so as to trap more particles\n without accidentally trapping extra particles.\n Divides the AOI into a grid and calculates the shortest path to the trap\n without passing any already occupied traps.\n\n Parameters\n ----------\n filled_traps_locs : TYPE list of list of\n traps locations [[x1, y1], [x2, y2]...]\n DESCRIPTION.\n target_particle_location : TYPE list [x,y] of target particle location [px]\n DESCRIPTION.\n target_trap_location : TYPE list of target trap location [px]\n DESCRIPTION.\n\n Returns\n -------\n TYPE move_x, move_y, success\n DESCRIPTION. The move to make to try and trap the particle without it\n getting caught in another trap along the way\n success - True if path was not blocked by other particles and a move\n was found. False otherwise.\n\n '''\n # TODO Make this more efficient. Also make it possible to try and\n # move in between particles.\n global c_p\n\n nx = int( (c_p['AOI'][1]-c_p['AOI'][0]) / c_p['cell_width'])\n ny = int( (c_p['AOI'][3]-c_p['AOI'][2]) / c_p['cell_width'])\n X, Y = np.meshgrid(\n np.arange(0, nx),\n np.arange(0, ny)\n )\n\n\n nbr_nodes = nx*ny\n node_weights = 1e6 * np.ones((nbr_nodes, 1)) # Initial large weights\n unvisited_set = np.zeros(nbr_nodes) # Replace with previous nodes\n previous_nodes = -1 * np.ones(nbr_nodes)\n\n def matrix_to_array_index(x, y, nx):\n return x + y * nx\n\n def array_to_matrix_index(idx, nx):\n y = idx // nx\n x = np.mod(idx, nx)\n return x, y\n\n def loc_to_index(x, y, nx):\n x = int(x/c_p['cell_width'])\n y = int(y/c_p['cell_width'])\n return matrix_to_array_index(x, y, nx)\n\n adjacency_matrix = get_adjacency_matrix(nx, ny)\n\n trap_radii = 3\n for location in filled_traps_locs:\n print(location)\n x = location[0] / c_p['cell_width']\n y = location[1] / c_p['cell_width']\n distance_map = (X - x)**2 + (Y - y)**2\n indices = [i for i, e in enumerate(np.reshape(distance_map, (nbr_nodes))) if e < trap_radii]\n adjacency_matrix[:, indices] = 0\n node_weights[indices] = 50\n node_weights[matrix_to_array_index(int(x), int(y), nx)] = 40\n unvisited_set[matrix_to_array_index(int(x), int(y), nx)] = 1\n\n target_node = loc_to_index(target_trap_location[0],\n target_trap_location[1], nx)\n current_node = loc_to_index(target_particle_location[0],\n target_particle_location[1], nx)\n\n # Djikstra:\n node_weights[current_node] = 0\n unvisited_set[current_node] = 1\n previous_nodes[current_node] = 0\n\n def update_dist(current_node, adjacency_indices, node_weights,\n previous_nodes):\n for index in adjacency_indices:\n if node_weights[current_node] + 1 < node_weights[index]:\n node_weights[index] = node_weights[current_node] + 1\n # All distances are either inf or 1\n previous_nodes[index] = current_node\n\n def find_next_node(unvisited_set, node_weights):\n res_list = [i for i, value in enumerate(unvisited_set) if value == 0]\n min_value = 1e6\n min_idx = -1\n for index in res_list:\n if node_weights[index] < min_value:\n min_idx = index\n min_value = node_weights[index]\n return min_idx\n\n iterations = 0\n while unvisited_set[target_node] == 0 and iterations <= nbr_nodes:\n adjacency_indices = [i for i, e in enumerate(adjacency_matrix[current_node,:]) if e == 1]\n update_dist(current_node, adjacency_indices,\n node_weights, previous_nodes)\n current_node = find_next_node(unvisited_set, node_weights)\n unvisited_set[current_node] = 1\n iterations += 1\n\n previous = target_node\n node_weights[previous]\n prev_x = []\n prev_y = []\n\n while previous != 0:\n node_weights[previous] = -3\n tmp_x, tmp_y = array_to_matrix_index(previous, nx)\n prev_x.append(tmp_x)\n prev_y.append(tmp_y)\n previous = int(previous_nodes[previous])\n if previous == -1:\n break\n\n if previous == -1:\n return 0, 0, False\n elif len(prev_x) > 3:\n x_move = prev_x[-3] * c_p['cell_width'] - target_particle_location[0]\n # SHould be -2 but was very slow\n y_move = prev_y[-3] * c_p['cell_width'] - target_particle_location[1]\n return x_move, y_move, True\n else:\n try:\n x_move = prev_x[-2] * c_p['cell_width'] - target_particle_location[0]\n y_move = prev_y[-2] * c_p['cell_width'] - target_particle_location[1]\n return x_move, y_move, True\n except:\n return 0, 0, False\n\n\ndef update_c_p(update_dict, wait_for_completion=True):\n '''\n Simple function for updating c_p['keys'] with new values 'values'.\n Ensures that all updates where successfull.\n Parameter wait_for_completion should be set to True if there is a need\n to wait for phasemask to be finished updating before continuing the program.\n '''\n ok_parameters = ['use_LGO', 'LGO_order', 'xm', 'ym', 'zm', 'setpoint_temperature',\n 'recording_duration', 'target_experiment_z', 'SLM_iterations',\n 'temperature_output_on','activate_traps_one_by_one','need_T_stable']\n\n requires_new_phasemask = ['use_LGO', 'LGO_order', 'xm', 'ym', 'zm', 'SLM_iterations']\n\n for key in update_dict:\n if key in ok_parameters:\n try:\n # TODO: Test that this works\n if key == 'xm' and min(update_dict[key]) > 1:\n c_p[key] = pixel_to_SLM_loc(update_dict[key], 0)\n print('xm' ,update_dict[key])\n elif key == 'ym' and min(update_dict[key]) > 1:\n c_p[key] = pixel_to_SLM_loc(update_dict[key], 1)\n print('ym ',update_dict[key])\n else:\n c_p[key] = update_dict[key]\n except:\n print('Could not update control parameter ', key, 'with value',\n value)\n return\n else:\n print('Invalid key: ', key)\n\n # Check that both xm and ym are updated\n if len(c_p['xm']) > len(c_p['ym']):\n c_p['xm'] = c_p['xm'][:len(c_p['ym'])]\n print(' WARNING! xm and ym not the same length, cutting off xm!')\n\n if len(c_p['ym']) > len(c_p['xm']):\n c_p['ym'] = c_p['ym'][:len(c_p['xm'])]\n print(' WARNING! xm and ym not the same length, cutting off ym!')\n\n for key in update_dict:\n if key in requires_new_phasemask:\n c_p['new_phasemask'] = True\n\n # Wait for new phasemask if user whishes this\n while c_p['new_phasemask'] and wait_for_completion:\n time.sleep(0.3)\n\n # Await stable temperature\n while c_p['need_T_stable'] and not c_p['temperature_stable'] and\\\n c_p['temperature_controller_connected']:\n time.sleep(0.3)\n\n\ndef count_interior_particles(margin=30):\n '''\n Function for counting the number of particles in the interior of the frame.\n margin\n '''\n global c_p\n interior_particles = 0\n for position in c_p['particle_centers']:\n interior_particles += 1\n\n return interior_particles\n\n\ndef set_AOI(half_image_width=50, left=None, right=None, up=None, down=None):\n '''\n Function for changing the Area Of Interest for the camera to the box specified by\n left,right,top,bottom\n Assumes global access to c_p\n '''\n global c_p\n\n # Do not want motors to be moving when changing AOI!\n c_p['motor_locks'][0].acquire()\n c_p['motor_locks'][1].acquire()\n # If exact values have been provided for all the corners change AOI\n if c_p['camera_model'] == 'ThorlabsCam':\n if left is not None and right is not None and up is not None and down is not None:\n if 0<=left<=1279 and left<=right<=1280 and 0<=up<=1079 and up<=down<=1080:\n c_p['AOI'][0] = left\n c_p['AOI'][1] = right\n c_p['AOI'][2] = up\n c_p['AOI'][3] = down\n else:\n print(\"Trying to set invalid area\")\n else:\n if left is not None and right is not None and up is not None and down is not None:\n if 0<=left<672 and left<=right<=672 and 0<=up<512 and up<=down<=512:\n c_p['AOI'][0] = left\n c_p['AOI'][1] = right\n c_p['AOI'][2] = up\n c_p['AOI'][3] = down\n else:\n print(\"Trying to set invalid area\")\n\n print('Setting AOI to ',c_p['AOI'])\n\n # Inform the camera and display thread about the updated AOI\n c_p['new_AOI_camera'] = True\n c_p['new_AOI_display'] = True\n\n # Update trap relative position\n update_traps_relative_pos()\n\n # Give motor threads time to catch up\n time.sleep(0.5)\n c_p['motor_locks'][0].release()\n c_p['motor_locks'][1].release()\n\n\ndef find_focus():\n \"\"\"\n Function which uses the laser to find a focus point\n \"\"\"\n\n # Idea - Search for a focus, first down 1000-ticks then up 2000 ticks from down pos\n # Take small steps :10-20 per step. Between each step check if intensity\n # Around the traps have increased enough\n #Direction focus down\n #\n focus_found = False\n while not focus_found:\n\n\n print('a')\n\n\ndef in_focus(margin=40):\n '''\n Function for determining if a image is in focus by looking at the intensity\n close to the trap positions and comparing it to the image median.\n # Highly recommended to change to only one trap before using this function\n # This function is also very unreliable. Better to use the old method +\n # deep learning I believe.\n '''\n\n global image\n global c_p\n median_intesity = np.median(image)\n\n\n if c_p['camera_model'] == 'basler':\n image_limits = [672,512]\n else:\n image_limits = [1200,1000]\n\n left = int(max(min(c_p['traps_absolute_pos'][0]) - margin, 0))\n right = int(min(max(c_p['traps_absolute_pos'][0]) + margin, image_limits[0]))\n up = int(max(min(c_p['traps_absolute_pos'][1]) - margin, 0))\n down = int(min(max(c_p['traps_absolute_pos'][1]) + margin, image_limits[1]))\n\n expected_median = (left - right) * (up - down) * median_intesity\n actual_median = np.sum(image[left:right,up:down])\n\n print(median_intesity,actual_median)\n print( expected_median + c_p['focus_threshold'])\n if actual_median > (expected_median + c_p['focus_threshold']):\n return True\n return False\n\n\ndef predict_particle_position_network(network,half_image_width=50,\n network_image_width=101,\n print_position=False):\n '''\n Function for making g a prediciton with a network and automatically updating the center position array.\n inputs :\n network - the network to predict with. Trained with deeptrack\n half_image_width - half the width of the image. needed to convert from deeptrack output to pixels\n network_image_width - Image width that the newtwork expects\n print_position - If the predicted positon should be printed in the console or not\n Outputs :\n Updates the center position of the particle\n '''\n global image\n global c_p\n resized = cv2.resize(copy.copy(image), (network_image_width,network_image_width), interpolation = cv2.INTER_AREA)\n pred = network.predict(np.reshape(resized/255,[1,network_image_width,network_image_width,1]))\n\n c_p['target_particle_center'][0] = half_image_width + pred[0][1] * half_image_width\n c_p['target_particle_center'][1] = half_image_width + pred[0][0] * half_image_width\n\n if print_position:\n print('Predicted posiiton is ',c_p['particle_centers'])\n\n\ndef get_particle_trap_distances():\n '''\n Calcualtes the distance between all particles and traps and returns a\n distance matrix, ordered as distances(traps,particles),\n To clarify the distance between trap n and particle m is distances[n][m].\n '''\n global c_p\n update_traps_relative_pos() # just in case\n nbr_traps = len(c_p['traps_relative_pos'][0])\n nbr_particles = len(c_p['particle_centers'][0])\n distances = np.ones((nbr_traps, nbr_particles))\n\n for i in range(nbr_traps):\n for j in range(nbr_particles):\n dx = (c_p['traps_relative_pos'][0][i] - c_p['particle_centers'][0][j])\n dy = (c_p['traps_relative_pos'][1][i] - c_p['particle_centers'][1][j])\n distances[i,j] = np.sqrt(dx * dx + dy * dy)\n return distances\n\n\ndef trap_occupied(distances, trap_index):\n '''\n Checks if a specific trap is occupied by a particle. If so set that trap to occupied.\n Updates if the trap is occupied or not and returns the index of the particle in the trap\n '''\n global c_p\n\n # Check that trap index is ok\n if trap_index > len(c_p['traps_occupied']) or trap_index < 0:\n print('Trap index out of range')\n return None\n for i in range(len(distances[trap_index, :])):\n dist_to_trap = distances[trap_index, i]\n if dist_to_trap <= c_p['movement_threshold']:\n c_p['traps_occupied'][trap_index] = True\n return i\n try:\n c_p['traps_occupied'][trap_index] = False\n return None\n except:\n print(\" Indexing error for trap index\", str(trap_index),\\\n \" length is \",len(c_p['traps_occupied']))\n return None\n\n\ndef check_all_traps(distances=None):\n '''\n Updates all traps to see if they are occupied.\n Returns the indices of the particles which are trapped. Indices refers to their\n position in the c_p['particle_centers'] array.\n Returns an empty array if there are no trapped particles\n '''\n if distances is None:\n distances = get_particle_trap_distances()\n trapped_particle_indices = []\n for trap_index in range(len(distances)):\n trapped_particle_index = trap_occupied(distances,trap_index)\n if trapped_particle_index is not None:\n trapped_particle_indices.append(trapped_particle_index)\n return trapped_particle_indices\n\n\ndef find_closest_unoccupied():\n '''\n Function for finding the paricle and (unoccupied) trap which are the closest\n Returns : min_index_trap,min_index_particle.\n Index of the untrapped particle closest to an unoccupied trap.\n '''\n\n distances = get_particle_trap_distances()\n trapped_particles = check_all_traps(distances)\n distances[:,trapped_particles] = 1e6 # These indices are not ok, put them very large\n\n min_distance = 2000 # If the particle is not within 2000 pixels then it is not within the frame\n min_index_particle = None # Index of particle which is closes to an unoccupied trap\n min_index_trap = None # Index of unoccupied trap which is closest to a particle\n\n # Check all the traps\n for trap_idx in range(len(c_p['traps_occupied'])):\n trapped = c_p['traps_occupied'][trap_idx]\n\n # If there is not a particle trapped in the trap check for the closest particle\n if not trapped and len(distances[0]) > 0:\n # Had problems with distances being [] if there were no particles\n particle_idx = np.argmin(distances[trap_idx])\n\n # If particle is within the threshold then update min_index and trap index as well as min distance\n if distances[trap_idx,particle_idx] move up\n move_direction = 1 => move down\n move_direction = 2 => move right\n move_direction = 3 => move left\n '''\n global c_p\n move_distance = 200\n if move_direction==0:\n # Move up (Particles in image move up on the screen)\n c_p['motor_movements'][1] = move_distance\n elif move_direction==1:\n # Move down\n c_p['motor_movements'][1] = -move_distance\n elif move_direction==2:\n # Move right\n c_p['motor_movements'][0] = move_distance\n elif move_direction==3:\n # Move left\n c_p['motor_movements'][0] = -move_distance\n else:\n print('Invalid move direction')\n\n\ndef start_record():\n '''\n Button function for starting of recording\n '''\n c_p['recording']= True\n print('Recording is on')\n\n\ndef stop_record():\n '''\n Button function for starting of recording\n '''\n c_p['recording']= False\n print('Recording is off')\n\n\ndef toggle_temperature_output():\n '''\n Function for toggling temperature output on/off.\n '''\n c_p['temperature_output_on'] = not c_p['temperature_output_on']\n print(\"c_p['temperature_output_on'] set to\",c_p['temperature_output_on'])\n\n\ndef toggle_bright_particle():\n '''\n Function for switching between bright and other particle\n '''\n c_p['bright_particle'] = not c_p['bright_particle']\n print(\"c_p['bright_particle'] set to\",c_p['bright_particle'])\n\n\ndef toggle_tracking():\n c_p['tracking_on'] = not c_p['tracking_on']\n print(\"Tracking is \",c_p['tracking_on'])\n\n\ndef focus_up():\n '''\n Used for focus button to shift focus slightly up\n '''\n c_p['z_starting_position'] += 5\n\n\ndef focus_down():\n '''\n Used for focus button to shift focus slightly up\n '''\n c_p['z_starting_position'] -= 5\n\n\ndef zoom_in(margin=60, use_traps=False):\n '''\n Helper function for zoom button and zoom function.\n Zooms in on an area around the traps\n '''\n if c_p['camera_model'] == 'ThorlabsCam':\n left = max(min(c_p['traps_absolute_pos'][0]) - margin, 0)\n left = int(left // 20 * 20)\n right = min(max(c_p['traps_absolute_pos'][0]) + margin, 1200)\n right = int(right // 20 * 20)\n up = max(min(c_p['traps_absolute_pos'][1]) - margin, 0)\n up = int(up // 20 * 20)\n down = min(max(c_p['traps_absolute_pos'][1]) + margin, 1000)\n down = int(down // 20 * 20)\n else:\n left = max(min(c_p['traps_absolute_pos'][0]) - margin, 0)\n left = int(left // 16 * 16)\n right = min(max(c_p['traps_absolute_pos'][0]) + margin, 672)\n right = int(right // 16 * 16)\n up = max(min(c_p['traps_absolute_pos'][1]) - margin, 0)\n up = int(up // 16 * 16)\n down = min(max(c_p['traps_absolute_pos'][1]) + margin, 512)\n down = int(down // 16 * 16)\n\n c_p['framerate'] = 500\n # Note calculated framerate is automagically saved.\n set_AOI(left=left, right=right, up=up, down=down)\n\n\ndef zoom_out():\n if c_p['camera_model'] == 'ThorlabsCam':\n set_AOI(left=0, right=1200, up=0, down=1000)\n c_p['framerate'] = 500\n else:\n set_AOI(left=0, right=672, up=0, down=512)\n\n\ndef search_for_particles():\n '''\n Function for searching after particles. Threats the sample as a grid and\n systmatically searches it\n '''\n x_max = 0.005 # [mm]\n delta_y = 3 # [mm]\n # Make movement\n if c_p['search_direction']== 'right':\n c_p['motor_movements'][0] = 300 # [px]\n\n elif c_p['search_direction']== 'left':\n c_p['motor_movements'][0] = -300\n\n elif c_p['search_direction']== 'up':\n c_p['motor_movements'][1] = 300\n\n elif c_p['search_direction']== 'down': # currently not used\n c_p['motor_movements'][1] = -300\n\n if c_p['search_direction']== 'up' and \\\n (c_p['motor_current_pos'][1]-c_p['motor_starting_pos'][1])>delta_y:\n c_p['search_direction']= 'right'\n c_p['x_start'] = c_p['motor_current_pos'][0]\n print('changing search direction to right, x_start is ',c_p['x_start'] )\n\n if c_p['search_direction']== 'right' and \\\n (c_p['motor_current_pos'][0]-c_p['x_start'])>x_max:\n\n if c_p['motor_current_pos'][1] - c_p['motor_starting_pos'][1]>delta_y/2:\n c_p['search_direction'] = 'down'\n print('changing search direction to down')\n else:\n c_p['search_direction'] = 'up'\n print('changing search direction to up')\n if c_p['search_direction']== 'down' \\\n and (c_p['motor_current_pos'][1] - c_p['motor_starting_pos'][1])<0:\n c_p['search_direction']=='right'\n print('changing search direction to right')\n\n\ndef update_traps_relative_pos():\n global c_p\n tmp_x = [x - c_p['AOI'][0] for x in c_p['traps_absolute_pos'][0] ]\n tmp_y = [y - c_p['AOI'][2] for y in c_p['traps_absolute_pos'][1] ]\n tmp = np.asarray([tmp_x, tmp_y])\n c_p['traps_relative_pos'] = tmp\n\n\ndef pixel_to_SLM_loc(locs, axis):\n '''\n Function for converting from PIXELS to SLM locations.\n '''\n global c_p\n if axis != 0 and axis != 1:\n print('cannot perform conversion, incorrect choice of axis')\n return locs\n offset = c_p['slm_x_center'] if not axis else c_p['slm_y_center']\n new_locs = [((x - offset) / c_p['slm_to_pixel']) for x in locs]\n return new_locs\n\ndef SLM_loc_to_trap_loc(xm, ym):\n '''\n Fucntion for updating the traps position based on their locaitons\n on the SLM.\n '''\n global c_p\n tmp_x = [x * c_p['slm_to_pixel'] + c_p['slm_x_center'] for x in xm]\n tmp_y = [y * c_p['slm_to_pixel'] + c_p['slm_y_center'] for y in ym]\n tmp = np.asarray([tmp_x, tmp_y])\n c_p['traps_absolute_pos'] = tmp\n print('Traps are at: ', c_p['traps_absolute_pos'] )\n update_traps_relative_pos()\n\n\ndef move_particles_slowly(last_d=30e-6):\n # Function for moving the particles between the center and the edges\n # without dropping then\n global c_p\n if last_d>40e-6 or last_d<0:\n print('Too large distance.')\n return\n if last_d>c_p['trap_separation']:\n while c_p['trap_separation']last_d:\n if c_p['new_phasemask']==False:\n if c_p['trap_separation'] - last_d < 1e-6:\n c_p['trap_separation'] = last_d\n else:\n c_p['trap_separation'] -= 1e-6\n c_p['new_phasemask'] = True\n print(c_p['trap_separation'])\n time.sleep(0.5)\n time.sleep(1)\n return\n\n\n############### Main script starts here ####################################\nc_p = get_default_c_p()\n# Create camera and set defaults\nglobal image\nif c_p['camera_model'] == 'ThorlabsCam':\n image = np.zeros((c_p['AOI'][1]-c_p['AOI'][0], c_p['AOI'][3]-c_p['AOI'][2], 1))\nelse:\n image = np.zeros((672,512,1))\n\n# Create a empty list to put the threads in\nthread_list = []\nd0x = -80e-6\nd0y = -80e-6\n\n# Define experiment to be run\n\nxm1, ym1 = SLM.get_xm_ym_rect(nbr_rows=2, nbr_columns=1, d0x=d0x, d0y=d0y, dx=20e-6, dy=20e-6,)\n# xm2, ym2 = SLM.get_xm_ym_rect(nbr_rows=1, nbr_columns=2, d0x=d0x, d0y=d0y, dx=12e-6, dy=20e-6,)\n# xm3, ym3 = SLM.get_xm_ym_rect(nbr_rows=1, nbr_columns=2, d0x=d0x, d0y=d0y, dx=14e-6, dy=20e-6,)\n# xm4, ym4 = SLM.get_xm_ym_rect(nbr_rows=1, nbr_columns=2, d0x=d0x, d0y=d0y, dx=16e-6, dy=20e-6,)\n# xm5, ym5 = SLM.get_xm_ym_rect(nbr_rows=1, nbr_columns=2, d0x=d0x, d0y=d0y, dx=18e-6, dy=20e-6,)\n# xm6, ym6 = SLM.get_xm_ym_rect(nbr_rows=1, nbr_columns=2, d0x=d0x, d0y=d0y, dx=20e-6, dy=20e-6,)\n# xm7, ym7 = SLM.get_xm_ym_rect(nbr_rows=1, nbr_columns=2, d0x=d0x, d0y=d0y, dx=22e-6, dy=20e-6,)\n# xm8, ym8 = SLM.get_xm_ym_rect(nbr_rows=1, nbr_columns=2, d0x=d0x, d0y=d0y, dx=24e-6, dy=20e-6,)\n# xm9, ym9 = SLM.get_xm_ym_rect(nbr_rows=1, nbr_columns=2, d0x=d0x, d0y=d0y, dx=26e-6, dy=20e-6,)\n# xm10, ym10 = SLM.get_xm_ym_rect(nbr_rows=1, nbr_columns=2, d0x=d0x, d0y=d0y, dx=30e-6, dy=20e-6,)\n\n\nexperiment_schedule = [\n{'xm':xm1, 'ym':ym1, 'use_LGO':[False],'target_experiment_z':1000,\n'LGO_order':4, 'recording_duration':1000,'SLM_iterations':30,'activate_traps_one_by_one':False}, # Should use few iteratoins when we only have 2 traps\n# {'LGO_order':-4,'xm':xm1, 'ym':ym1, 'use_LGO':[True]},\n# {'LGO_order':8,'xm':xm1, 'ym':ym1, 'use_LGO':[True]},\n# {'LGO_order':-8,'xm':xm1, 'ym':ym1, 'use_LGO':[True]},\n# {'LGO_order':12,'xm':xm1, 'ym':ym1, 'use_LGO':[True]},\n# {'LGO_order':-12,'xm':xm1, 'ym':ym1, 'use_LGO':[True]},\n# {'LGO_order':16,'xm':xm1, 'ym':ym1, 'use_LGO':[True]},\n# {'LGO_order':-16,'xm':xm1, 'ym':ym1, 'use_LGO':[True]},\n\n# {'xm':xm3, 'ym':ym3, 'use_LGO':[False,]},\n# {'xm':xm4, 'ym':ym4, 'use_LGO':[False,]},\n# {'xm':xm5, 'ym':ym5, 'use_LGO':[False,]},\n# {'xm':xm6, 'ym':ym6, 'use_LGO':[False,]},\n# {'xm':xm7, 'ym':ym7, 'use_LGO':[False,]},\n# {'xm':xm8, 'ym':ym8, 'use_LGO':[False,]},\n# {'xm':xm9, 'ym':ym9, 'use_LGO':[False,]},\n# {'xm':xm10, 'ym':ym10, 'use_LGO':[False,]},\n\n]\n\nc_p['experiment_schedule'] = experiment_schedule\nT_D = UserInterface(tkinter.Tk(), \"Control display\")\n\nsys.exit()\n", "sub_path": "automagic_experiments_0_5.py", "file_name": "automagic_experiments_0_5.py", "file_ext": "py", "file_size_in_byte": 86452, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "code-starcoder2", "pt": "62", "api": [{"api_name": "datetime.datetime.now", "line_number": 22, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 22, "usage_type": "name"}, {"api_name": "os.mkdir", "line_number": 28, "usage_type": "call"}, {"api_name": "threading.Lock", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 142, "usage_type": "call"}, {"api_name": "SLM.get_xm_ym_rect", "line_number": 149, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 152, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 158, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 162, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 178, "usage_type": "call"}, {"api_name": "TemperatureControllerTED4015.TED4015", "line_number": 242, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 252, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 269, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 269, "usage_type": "attribute"}, {"api_name": "SLM.get_default_xm_ym", "line_number": 283, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 284, "usage_type": "call"}, {"api_name": "SLM.get_delta", "line_number": 285, "usage_type": "call"}, {"api_name": "SLM.GSW", "line_number": 289, "usage_type": "call"}, {"api_name": "SLM.get_delta", "line_number": 303, "usage_type": "call"}, {"api_name": "SLM.GS", "line_number": 309, "usage_type": "call"}, {"api_name": "SLM.GSW", "line_number": 313, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 324, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 327, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 351, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 351, "usage_type": "attribute"}, {"api_name": "TemperatureControllerTED4015.TED4015_controller", "line_number": 368, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 402, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 415, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 432, "usage_type": "call"}, {"api_name": "tkinter.Canvas", "line_number": 436, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 439, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 441, "usage_type": "call"}, {"api_name": "tkinter.filedialog.askopenfilename", "line_number": 466, "usage_type": "call"}, {"api_name": "tkinter.filedialog", "line_number": 466, "usage_type": "name"}, {"api_name": "read_dict_from_file.ReadFileToExperimentList", "line_number": 470, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 477, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 479, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 498, "usage_type": "call"}, {"api_name": "instrumental.u", "line_number": 503, "usage_type": "name"}, {"api_name": "instrumental.u", "line_number": 520, "usage_type": "name"}, {"api_name": "instrumental.u", "line_number": 527, "usage_type": "name"}, {"api_name": "instrumental.u", "line_number": 528, "usage_type": "name"}, {"api_name": "instrumental.u", "line_number": 529, "usage_type": "name"}, {"api_name": "tkinter.Button", "line_number": 557, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 558, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 559, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 560, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 561, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 562, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 563, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 564, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 565, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 567, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 569, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 573, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 574, "usage_type": "call"}, {"api_name": "tkinter.Entry", "line_number": 575, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 577, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 632, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 634, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 636, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 638, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 640, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 641, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 642, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 644, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 645, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 651, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 653, "usage_type": "call"}, {"api_name": "tkinter.Button", "line_number": 655, "usage_type": "call"}, {"api_name": "tkinter.Toplevel", "line_number": 695, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 701, "usage_type": "call"}, {"api_name": "time.strftime", "line_number": 702, "usage_type": "call"}, {"api_name": "cv2.cvtColor", "line_number": 703, "usage_type": "call"}, {"api_name": "cv2.COLOR_RGB2BGR", "line_number": 703, "usage_type": "attribute"}, {"api_name": "numpy.shape", "line_number": 803, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 811, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 811, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 821, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 821, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 821, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 821, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 821, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 822, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 826, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 826, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 826, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 826, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 826, "usage_type": "attribute"}, {"api_name": "tkinter.NW", "line_number": 827, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 841, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 841, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 841, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 841, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 841, "usage_type": "attribute"}, {"api_name": "PIL.Image.ImageTk.PhotoImage", "line_number": 850, "usage_type": "call"}, {"api_name": "PIL.Image.ImageTk", "line_number": 850, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 850, "usage_type": "name"}, {"api_name": "PIL.Image.Image.fromarray", "line_number": 850, "usage_type": "call"}, {"api_name": "PIL.Image.Image", "line_number": 850, "usage_type": "attribute"}, {"api_name": "threading.Thread", "line_number": 857, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 865, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 865, "usage_type": "attribute"}, {"api_name": "ThorlabsMotor.InitiateMotor", "line_number": 873, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 896, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 898, "usage_type": "call"}, {"api_name": "numpy.abs", "line_number": 901, "usage_type": "call"}, {"api_name": "ThorlabsMotor.MoveMotorPixels", "line_number": 902, "usage_type": "call"}, {"api_name": "ThorlabsMotor.MoveMotorPixels", "line_number": 905, "usage_type": "call"}, {"api_name": "ThorlabsMotor.MoveMotorPixels", "line_number": 907, "usage_type": "call"}, {"api_name": "ThorlabsMotor.DisconnectMotor", "line_number": 912, "usage_type": "call"}, {"api_name": "ThorlabsMotor.InitiateMotor", "line_number": 917, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 925, "usage_type": "call"}, {"api_name": "ThorlabsMotor.DisconnectMotor", "line_number": 927, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 943, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 949, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 949, "usage_type": "attribute"}, {"api_name": "ThorlabsMotor.PiezoMotor", "line_number": 953, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 996, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 1000, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 1003, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 1003, "usage_type": "attribute"}, {"api_name": "ThorlabsCam.get_camera", "line_number": 1009, "usage_type": "call"}, {"api_name": "ThorlabsCam.find_exposure_time", "line_number": 1011, "usage_type": "call"}, {"api_name": "pypylon.pylon.TlFactory.GetInstance", "line_number": 1015, "usage_type": "call"}, {"api_name": "pypylon.pylon.TlFactory", "line_number": 1015, "usage_type": "attribute"}, {"api_name": "pypylon.pylon", "line_number": 1015, "usage_type": "name"}, {"api_name": "pypylon.pylon.InstantCamera", "line_number": 1016, "usage_type": "call"}, {"api_name": "pypylon.pylon", "line_number": 1016, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 1018, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 1049, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 1049, "usage_type": "name"}, {"api_name": "cv2.VideoWriter_fourcc", "line_number": 1050, "usage_type": "call"}, {"api_name": "cv2.VideoWriter", "line_number": 1058, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1086, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1108, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 1117, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 1132, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 1133, "usage_type": "call"}, {"api_name": "pypylon.pylon.PylonImage", "line_number": 1154, "usage_type": "call"}, {"api_name": "pypylon.pylon", "line_number": 1154, "usage_type": "name"}, {"api_name": "time.time", "line_number": 1173, "usage_type": "call"}, {"api_name": "numpy.flip", "line_number": 1185, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1197, "usage_type": "call"}, {"api_name": "pickle.dump", "line_number": 1208, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 1218, "usage_type": "attribute"}, {"api_name": "threading.Thread.__init__", "line_number": 1223, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 1223, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 1279, "usage_type": "call"}, {"api_name": "find_particle_threshold.find_particle_centers", "line_number": 1301, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 1301, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 1306, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1324, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1330, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 1335, "usage_type": "call"}, {"api_name": "numpy.round", "line_number": 1337, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1337, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1344, "usage_type": "call"}, {"api_name": "time.time", "line_number": 1346, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 1376, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 1384, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 1385, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 1397, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 1405, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 1450, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1451, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1452, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 1456, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 1457, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1458, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 1461, "usage_type": "call"}, {"api_name": "numpy.meshgrid", "line_number": 1499, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1500, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 1501, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 1506, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 1507, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 1508, "usage_type": "call"}, {"api_name": "numpy.mod", "line_number": 1515, "usage_type": "call"}, {"api_name": "numpy.reshape", "line_number": 1531, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 1651, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 1656, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 1713, "usage_type": "call"}, {"api_name": "numpy.median", "line_number": 1746, "usage_type": "call"}, {"api_name": "numpy.sum", "line_number": 1760, "usage_type": "call"}, {"api_name": "cv2.resize", "line_number": 1784, "usage_type": "call"}, {"api_name": "copy.copy", "line_number": 1784, "usage_type": "call"}, {"api_name": "cv2.INTER_AREA", "line_number": 1784, "usage_type": "attribute"}, {"api_name": "numpy.reshape", "line_number": 1785, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 1804, "usage_type": "call"}, {"api_name": "numpy.sqrt", "line_number": 1810, "usage_type": "call"}, {"api_name": "numpy.argmin", "line_number": 1878, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 2049, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 2073, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 2095, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 2096, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 2106, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 2107, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2116, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 2118, "usage_type": "call"}, {"api_name": "SLM.get_xm_ym_rect", "line_number": 2127, "usage_type": "call"}, {"api_name": "tkinter.Tk", "line_number": 2162, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 2164, "usage_type": "call"}]}