diff --git "a/071.jsonl" "b/071.jsonl" new file mode 100644--- /dev/null +++ "b/071.jsonl" @@ -0,0 +1,278 @@ +{"seq_id": "17113956884", "text": "# -*- coding: utf-8 -*-\n\"\"\"\n tests/api/promo_codes_endpoints_test\n ~~~~~\n\n PromoCodes API endpoints tests\n\"\"\"\nfrom hamcrest import *\nimport mock\n\nimport json\n\nfrom tests.conftest import mock_jwt_required\nfrom videona_platform.promo_codes import models as promo_codes_models\nfrom videona_platform.api.promo_codes import validate_promo_code\nfrom videona_platform.promo_codes.promo_codes_service import PromoCodeValidationError\n\n\nclass TestPromoCodesEndpoints(object):\n @mock.patch('videona_platform.api.promo_codes.jsonify', mock.Mock())\n @mock.patch('flask_jwt._jwt_required', mock.Mock(side_effect=mock_jwt_required))\n @mock.patch('videona_platform.api.promo_codes.current_identity')\n @mock.patch('videona_platform.promo_codes.promo_codes_service.promo_codes_service.validate_code')\n def test_validate_calls_service(self, validate_code, current_identity, push_context):\n\n validate_promo_code('kode')\n\n validate_code.assert_called_once_with('kode', current_identity)\n\n @mock.patch('flask_jwt._jwt_required', mock.Mock(side_effect=mock_jwt_required))\n @mock.patch('videona_platform.promo_codes.promo_codes_service.promo_codes_service.first')\n def test_validate_returns_error_if_no_code(self, first, api_app):\n with api_app.test_request_context():\n first.return_value = None\n\n response, status_code = validate_promo_code('notfoundcode')\n\n assert_that(status_code, is_(404))\n assert_that(json.loads(response.data), is_({'valid_code': False, 'campaign': '', 'error': PromoCodeValidationError.MSG_CODE_NOT_FOUND}))\n\n @mock.patch('flask_jwt._jwt_required', mock.Mock(side_effect=mock_jwt_required))\n @mock.patch('videona_platform.api.promo_codes.current_identity', None)\n @mock.patch('videona_platform.promo_codes.promo_codes_service.promo_codes_service.first')\n def test_validate_returns_valid_code_response_if_code_validates(self, first, session, api_app):\n with api_app.test_request_context():\n code = promo_codes_models.PromoCode(code='code', campaign='wolder')\n first.return_value = code\n\n response, status_code = validate_promo_code(code_string='code')\n\n assert_that(status_code, is_(200))\n assert_that(json.loads(response.data), is_({'valid_code': True, 'campaign': 'wolder'}))\n", "repo_name": "IAgof/VideonaPlatform", "sub_path": "tests/api/promo_codes_endpoints_test.py", "file_name": "promo_codes_endpoints_test.py", "file_ext": "py", "file_size_in_byte": 2350, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "videona_platform.api.promo_codes.validate_promo_code", "line_number": 26, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 20, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 20, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 21, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 21, "usage_type": "call"}, {"api_name": "tests.conftest.mock_jwt_required", "line_number": 21, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 22, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 23, "usage_type": "call"}, {"api_name": "videona_platform.api.promo_codes.validate_promo_code", "line_number": 36, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 39, "usage_type": "call"}, {"api_name": "videona_platform.promo_codes.promo_codes_service.PromoCodeValidationError.MSG_CODE_NOT_FOUND", "line_number": 39, "usage_type": "attribute"}, {"api_name": "videona_platform.promo_codes.promo_codes_service.PromoCodeValidationError", "line_number": 39, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 30, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 30, "usage_type": "call"}, {"api_name": "tests.conftest.mock_jwt_required", "line_number": 30, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 31, "usage_type": "call"}, {"api_name": "videona_platform.promo_codes.models.PromoCode", "line_number": 46, "usage_type": "call"}, {"api_name": "videona_platform.promo_codes.models", "line_number": 46, "usage_type": "name"}, {"api_name": "videona_platform.api.promo_codes.validate_promo_code", "line_number": 49, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 52, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 41, "usage_type": "call"}, {"api_name": "mock.Mock", "line_number": 41, "usage_type": "call"}, {"api_name": "tests.conftest.mock_jwt_required", "line_number": 41, "usage_type": "name"}, {"api_name": "mock.patch", "line_number": 42, "usage_type": "call"}, {"api_name": "mock.patch", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "19120828623", "text": "import sys\n\nfrom mastodon import Mastodon\n\nfrom oabot.post import create_replies\nfrom oabot.extract import EMAIL\n\n\nif __name__ == \"__main__\":\n if EMAIL is None:\n raise TypeError(\n \"Set the EMAIL environment variable to your email \"\n \"address for polite usage of the APIs.\"\n )\n if len(sys.argv) != 2:\n raise ValueError(\"Need to provide a Mastodon status ID\")\n try:\n status_id = int(sys.argv[1])\n except ValueError:\n raise ValueError(\"Mastodon status ID must be an integer\")\n m = Mastodon(api_base_url=\"https://neuromatch.social\")\n replies = create_replies(m.status(status_id).content)\n if not replies:\n print(\"No replies to be made\")\n else:\n for line in replies:\n print(line)\n", "repo_name": "mstimberg/openaccess_mastodon_bot", "sub_path": "run_mastodon.py", "file_name": "run_mastodon.py", "file_ext": "py", "file_size_in_byte": 781, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "oabot.extract.EMAIL", "line_number": 10, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 15, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "mastodon.Mastodon", "line_number": 21, "usage_type": "call"}, {"api_name": "oabot.post.create_replies", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "26064200135", "text": "from django import template\nfrom django.conf import settings\nfrom django.utils.html import conditional_escape\nfrom django.utils.safestring import mark_safe\nfrom archives.models import LEVEL_STATUS_CHOICES\n\nregister = template.Library()\n\n\n@register.filter(name=\"whitehat_tag\", need_autoescape=True)\ndef whitehat_level_tag_filter(name, autoescape=True):\n colors = dict((_[1][1], _[2]) for _ in settings.USER_LEVEL_RANGE)\n if autoescape:\n name = conditional_escape(name)\n return mark_safe('{value}'.format(color=colors[name], value=name))\n\n\n@register.filter(name=\"post_tag\", need_autoescape=True)\ndef post_level_tag_filter(value, autoescape=True):\n levels = (_[1] for _ in LEVEL_STATUS_CHOICES)\n colors = dict(zip(levels, ('secondary', 'primary', 'warning', 'danger')))\n\n if autoescape:\n value = conditional_escape(value)\n return mark_safe('{value}'.format(color=colors[value], value=value))\n\n\n@register.filter(name=\"post_tag_color\", is_safe=True)\ndef post_level_tag_color_filter(value):\n levels = (_[0] for _ in LEVEL_STATUS_CHOICES)\n colors = dict(zip(levels, ('secondary', 'primary', 'warning', 'danger')))\n\n return colors[value]\n\n\n@register.filter(name=\"css\", is_safe=True)\ndef css_filter(form, css):\n if 'class' in form.field.widget.attrs:\n form.field.widget.attrs['class'] += \" %s\" % css\n else:\n form.field.widget.attrs['class'] = css\n\n return form\n\n\n@register.filter(name=\"placeholder\", is_safe=True)\ndef placeholder_filter(form, default=\"\"):\n text = default if default else form.label\n if 'placeholder' not in form.field.widget.attrs:\n form.field.widget.attrs['placeholder'] = text\n\n return form\n\n\n@register.filter(name=\"first_error\", is_safe=True)\ndef first_error_filter(errors):\n if not errors:\n return errors\n\n if 'captcha' in errors:\n data = errors['captcha'].as_text()\n else:\n data = errors.get(tuple(errors)[0]).as_text()\n\n return data\n\n\n@register.filter\ndef level_progress_bar(rank):\n total = rank\n for index, level in enumerate(settings.USER_LEVEL_RANGE):\n if level[0][0] <= rank < level[0][1] and index < len(settings.USER_LEVEL_RANGE) - 1:\n total = level[0][1]\n break\n\n return rank / total * 100", "repo_name": "phith0n/mooder", "sub_path": "archives/templatetags/template_helper.py", "file_name": "template_helper.py", "file_ext": "py", "file_size_in_byte": 2358, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 746, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.template.Library", "line_number": 7, "usage_type": "call"}, {"api_name": "django.template", "line_number": 7, "usage_type": "name"}, {"api_name": "django.conf.settings.USER_LEVEL_RANGE", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 12, "usage_type": "name"}, {"api_name": "django.utils.html.conditional_escape", "line_number": 14, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 15, "usage_type": "call"}, {"api_name": "archives.models.LEVEL_STATUS_CHOICES", "line_number": 20, "usage_type": "name"}, {"api_name": "django.utils.html.conditional_escape", "line_number": 24, "usage_type": "call"}, {"api_name": "django.utils.safestring.mark_safe", "line_number": 25, "usage_type": "call"}, {"api_name": "archives.models.LEVEL_STATUS_CHOICES", "line_number": 30, "usage_type": "name"}, {"api_name": "django.conf.settings.USER_LEVEL_RANGE", "line_number": 71, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 71, "usage_type": "name"}, {"api_name": "django.conf.settings.USER_LEVEL_RANGE", "line_number": 72, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 72, "usage_type": "name"}]} +{"seq_id": "8315104074", "text": "from django.urls import path\nfrom . import views\n\napp_name = 'contacts'\n\nurlpatterns = [\n path('add/', views.add, name=\"add\"),\n path('contacts/', views.ContactList.as_view()),\n path('finance/', views.FinanceList.as_view()),\n]", "repo_name": "rsgilbert/contactsAPI", "sub_path": "contacts/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 234, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.urls.path", "line_number": 7, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 8, "usage_type": "call"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}]} +{"seq_id": "11182192010", "text": "from discord.ext import commands\nfrom DaveBOT import checks\n\n\nclass Admin:\n \"\"\"Admin-only commands.\"\"\"\n def __init__(self, bot):\n self.client = bot\n\n @commands.command(hidden=True)\n @checks.adminonly()\n async def load(self, *, module: str):\n \"\"\"Load a module.\"\"\"\n try:\n self.client.load_extension(module)\n except Exception as e:\n await self.client.say(f\"{type(e).__name__}: {e}\")\n else:\n await self.client.say(\"Module loaded.\")\n\n @commands.command(hidden=True)\n @checks.adminonly()\n async def unload(self, *, module: str):\n \"\"\"Unload a module.\"\"\"\n try:\n self.client.unload_extension(module)\n except Exception as e:\n await self.client.say(f\"{type(e).__name__}: {e}\")\n else:\n await self.client.say(\"Module unloaded.\")\n\n @commands.command(hidden=True)\n @checks.adminonly()\n async def reload(self, *, module: str):\n \"\"\"Reload a module.\"\"\"\n try:\n self.client.unload_extension(module)\n self.client.load_extension(module)\n except Exception as e:\n await self.client.say(f\"{type(e).__name__}: {e}\")\n else:\n await self.client.say(\"Module reloaded.\")\n\n\ndef setup(bot):\n bot.add_cog(Admin(bot))\n", "repo_name": "fisherthewol/Dave-BOT", "sub_path": "DaveBOT/cogs/admin.py", "file_name": "admin.py", "file_ext": "py", "file_size_in_byte": 1320, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "discord.ext.commands.command", "line_number": 10, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 10, "usage_type": "name"}, {"api_name": "DaveBOT.checks.adminonly", "line_number": 11, "usage_type": "call"}, {"api_name": "DaveBOT.checks", "line_number": 11, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 21, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 21, "usage_type": "name"}, {"api_name": "DaveBOT.checks.adminonly", "line_number": 22, "usage_type": "call"}, {"api_name": "DaveBOT.checks", "line_number": 22, "usage_type": "name"}, {"api_name": "discord.ext.commands.command", "line_number": 32, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 32, "usage_type": "name"}, {"api_name": "DaveBOT.checks.adminonly", "line_number": 33, "usage_type": "call"}, {"api_name": "DaveBOT.checks", "line_number": 33, "usage_type": "name"}]} +{"seq_id": "18881017879", "text": "from typing import Tuple\n\nimport numpy as np\n\nfrom ...utils import exclude_list_from_list\n\n\nclass Walker(object):\n def __init__(self, coord: Tuple[int, int], main_step_prob: float = 1.0):\n self.start_coord = coord\n self.main_step_prob = main_step_prob\n self.coord = list(coord)\n\n def random_step(self, real_step, possible_actions):\n if len(possible_actions) == 1:\n return real_step\n\n if np.random.binomial(n=2, p=self.main_step_prob):\n return real_step\n else:\n return np.random.choice(\n exclude_list_from_list(possible_actions, [real_step])\n )\n\n def get_coord_from_action(self, action, cur_coord):\n x, y = cur_coord\n if action == \"left\":\n y -= 1\n elif action == \"right\":\n y += 1\n elif action == \"up\":\n x -= 1\n elif action == \"down\":\n x += 1\n return x, y\n\n def step(self, direction, possible_actions, do_step=True):\n if direction not in possible_actions:\n raise RuntimeError(\"direction should be in possible_actions\")\n\n direction = self.random_step(direction, possible_actions)\n new_x, new_y = self.get_coord_from_action(direction, tuple(self.coord))\n self.coord[0] = new_x\n self.coord[1] = new_y\n\n def reset_coord(self):\n self.coord = list(self.start_coord)\n", "repo_name": "VSydorskyy/iasa_multiagent", "sub_path": "matk/reinforcment/agents/walker.py", "file_name": "walker.py", "file_ext": "py", "file_size_in_byte": 1413, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "typing.Tuple", "line_number": 9, "usage_type": "name"}, {"api_name": "numpy.random.binomial", "line_number": 18, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 18, "usage_type": "attribute"}, {"api_name": "numpy.random.choice", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 21, "usage_type": "attribute"}, {"api_name": "utils.exclude_list_from_list", "line_number": 22, "usage_type": "call"}]} +{"seq_id": "23097576742", "text": "import pandas as pd\n#from html.parser import HTMLParser\n#from collections import defaultdict\n#import urllib.request\nimport tempfile\nimport pickle\nimport requests\nfrom lxml import etree\nimport shutil\nimport os.path as osp\nfrom pprint import pprint\nimport datetime\nimport re\nimport os\n\nclass Parser(object):\n def __init__(self,queries=('חיפה',)):\n\n #self.debug = True\n self.debug = False\n if type(queries) is str:\n queries = (queries,)\n self.queries = queries\n self.addr_prefix = 'https://www.gov.il/he/departments/news/'\n\n save_dir = './' #\n s_queries = '_'.join(queries)\n self.tar_fname = osp.join(save_dir,'govil_parse_{}_{}'.format(datetime.datetime.now().strftime('%d%m%y_%Hh'),s_queries))\n self.tar_fname_recent = osp.join(save_dir,'govil_parse_{}_{}'.format('recent', s_queries))\n print('target file name: {}'.format(self.tar_fname))\n self.res_data = None\n def get_addresses(self):\n start_date = datetime.datetime(day=1,month=3,year=2020)\n end_date = datetime.datetime.now()\n delta = datetime.timedelta(days=1)\n cur = start_date\n err_msg = 'לא מצאנו את מה שחיפשת.'\n #addresses = []\n html = None\n while cur <= end_date:\n dd = cur.day\n mm = cur.month\n yyyy = cur.year\n for num in range(1,100): # no more than 100 updates per day\n url = '{}{:02d}{:02d}{:04d}_{:02d}'.format(self.addr_prefix, dd,mm,yyyy, num)\n success = False\n seek_next_page = False\n for tries in range(3):\n try:\n print('try {}, checking {}...'.format(tries, url))\n html = requests.get(url).text\n if err_msg in html:\n print('no page')\n seek_next_page = True\n pass\n else:\n success = True\n break\n except Exception as e:\n print(e)\n if success:\n yield html, url\n #addresses.append(url)\n if seek_next_page:\n break\n\n cur += delta\n #return addresses\n\n def parse_details(self,s=None,url='',patient=-1):\n date = re.findall('\\d\\d\\.\\d\\d\\.\\d\\d\\d\\d',s)\n hours = re.findall('\\d\\d:\\d\\d',s)\n return dict(date=date,hours=hours,text=s,url=url,patient=patient)\n\n def read_page(self, html=None, addr='https://www.gov.il/he/departments/news/20032020_04'):\n if html is None:\n html = requests.get(addr).text\n debug=True\n else:\n debug=False\n dom = etree.HTML(html)\n text = dom.xpath('//div[@id=\"NewsContent\"]/p/node()')\n dat = []\n cur_patient=-1\n for line in text:\n if type(line) is not etree._Element: # i.e. string with details\n if any([q in line for q in self.queries]):\n di = self.parse_details(line,addr,patient=cur_patient)\n print('patient: {}, date: {}, times: {}, all: {}'.format(di['patient'], di['date'], di['hours'], di['text']))\n\n dat.append(di)\n else: # element\n try:\n cur_patient = re.findall('{} (\\d+)'.format('חולה מספר'),line.text)[0]\n except:\n pass\n # cur_patient = -1\n return dat\n def read_pages(self):\n data = []\n for html, url in self.get_addresses():\n data.extend(self.read_page(html=html, addr=url))\n self.res_data = data\n def save_parsed_data(self):\n data = self.res_data\n df = pd.DataFrame(data)\n df['text'] = df['text'].apply(str.strip)\n df['url'] = df['url'].apply(lambda x: '{}'.format(x,x))\n df = df.iloc[::-1] # reverse\n open(self.tar_fname+'.html','w').writelines(df.to_html(escape=False))\n open(self.tar_fname+'.md', 'w').writelines(df.to_markdown())\n print('written HTML to {}'.format(self.tar_fname+'.html'))\n print('written MD to {}'.format(self.tar_fname+'.md'))\n shutil.copy(self.tar_fname+'.md',self.tar_fname_recent+'.md')\n os.system('xdg-open {}'.format(self.tar_fname+'.html'))\n\ndef someplot():\n import numpy as np\n import matplotlib.pyplot as plt\n x = np.cumsum(np.random.randn(1000))\n print(x)\n plt.plot(x)\n plt.show()\n exit(0)\nif __name__ == '__main__':\n #someplot()\n parser = Parser(queries='חיפה')\n #parser = Parser(queries=['חיפה','קרית ים','קריית ים','אתא','מוצקין','קרית חיים','קריית חיים','ביאליק','הקריות'])\n # parser = Parser(queries='סטוק')\n # parser = Parser(queries='כנסת')\n # parser = Parser(queries=['יקנעם','יוקנעם'])\n # parser.read_page()\n parser.read_pages()\n parser.save_parsed_data()\n", "repo_name": "IdoZach/govil_corona_parse", "sub_path": "govil_corona_parser.py", "file_name": "govil_corona_parser.py", "file_ext": "py", "file_size_in_byte": 5115, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.path.join", "line_number": 28, "usage_type": "call"}, {"api_name": "os.path", "line_number": 28, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 28, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 28, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 29, "usage_type": "call"}, {"api_name": "os.path", "line_number": 29, "usage_type": "name"}, {"api_name": "datetime.datetime", "line_number": 33, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 34, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 34, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 35, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 51, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 71, "usage_type": "call"}, {"api_name": "re.findall", "line_number": 72, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 77, "usage_type": "call"}, {"api_name": "lxml.etree.HTML", "line_number": 81, "usage_type": "call"}, {"api_name": "lxml.etree", "line_number": 81, "usage_type": "name"}, {"api_name": "lxml.etree._Element", "line_number": 86, "usage_type": "attribute"}, {"api_name": "lxml.etree", "line_number": 86, "usage_type": "name"}, {"api_name": "re.findall", "line_number": 94, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 106, "usage_type": "call"}, {"api_name": "shutil.copy", "line_number": 114, "usage_type": "call"}, {"api_name": "os.system", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.cumsum", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 120, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 120, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}]} +{"seq_id": "35635217506", "text": "# ami_creation.py\n\nfrom datetime import datetime, timedelta\nimport aws_utils as awsutils\nimport os\nimport boto3\n\nREGION = os.environ['AWS_REGION']\nRETENTION = os.getenv('RETENTION', 1)\n\ndef get_instance_name(fid):\n # When given an instance ID as str e.g. 'i-1234567', return the instance 'Name' from the name tag.\n ec2 = boto3.resource('ec2')\n ec2instance = ec2.Instance(fid)\n instancename = ''\n for tags in ec2instance.tags:\n if tags[\"Key\"] == 'Name':\n instancename = tags[\"Value\"]\n return instancename\n\ndef backup(region_id=REGION):\n '''This function searches for all EC2 instances with a tag of BackUp\n and creates a AMI for them and tags the images with a\n RemoveOn tag of a YYYYMMDD value of days in UTC mentioned in RETENTION variable from today\n '''\n created_on = datetime.utcnow().strftime('%Y%m%d')\n remove_on = (datetime.utcnow() + timedelta(days=RETENTION)).strftime('%Y%m%d')\n session = awsutils.get_session(region_id)\n \n client = session.client('ec2')\n resource = session.resource('ec2')\n \n reservations = client.describe_instances(Filters=[{'Name': 'tag-key', 'Values': ['BackUp']}])\n \n for reservation in reservations['Reservations']:\n for instance_description in reservation['Instances']:\n instance_id = instance_description['InstanceId']\n name_tag = get_instance_name(instance_id)\n name = f\"{name_tag}_InstanceId({instance_id})_CreatedOn({created_on})\"\n print(f\"Creating Backup: {name}\")\n image_description = client.create_image(InstanceId=instance_id, Name=name, NoReboot=True)\n images = []\n images.append(image_description['ImageId'])\n image = resource.Image(image_description['ImageId'])\n image.create_tags(Tags=[{'Key': 'RemoveOn', 'Value': remove_on}, {'Key': 'Name', 'Value': name}])\n\nif __name__ == '__main__':\n backup(REGION)", "repo_name": "pawarrchetan/aws-utilities", "sub_path": "aws-ami-management/ami_creation.py", "file_name": "ami_creation.py", "file_ext": "py", "file_size_in_byte": 1943, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.environ", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.getenv", "line_number": 9, "usage_type": "call"}, {"api_name": "boto3.resource", "line_number": 13, "usage_type": "call"}, {"api_name": "datetime.datetime.utcnow", "line_number": 26, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 26, "usage_type": "name"}, {"api_name": "datetime.datetime.utcnow", "line_number": 27, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 27, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 27, "usage_type": "call"}, {"api_name": "aws_utils.get_session", "line_number": 28, "usage_type": "call"}]} +{"seq_id": "31489233491", "text": "import os\nimport glob\nimport re\nimport multiprocessing as mp\nimport numpy as np\nimport scipy.io.wavfile\nimport pandas as pd\nimport tqdm\n\nfrom magnolia.utils.bss_eval import bss_eval_sources\n\n\ndef evaluate(input_path, output_csv_file, target_stype=None, eval_sr=8000, num_sources=2):\n print('starting evaluation on directory {}'.format(input_path))\n \n mix_glob_pattern = 'mix_*_snr_*.wav'\n mix_regex = r\"mix_(?P[0-9]+)_snr_(?P[0-9\\-\\.]+).wav\"\n original_source_glob_format = 'mix_{}_original_source_*.wav'\n original_regex = r\"mix_(?P[0-9]+)_original_source_(?P[0-9]+).wav\"\n separated_source_glob_format = 'mix_{}_separated_source_*.wav'\n extended_separated_source_glob_format = 'mix_{}_*_separated_source_*.wav'\n separated_regex = r\"mix_(?P[0-9]+)_separated_source_(?P[0-9]+).wav\"\n extended_separated_regex = r\"mix_(?P[0-9]+)_(?P[a-zA-Z]*)_separated_source_(?P[0-9]+).wav\"\n\n mix_info = {'snr': [],\n 'mix_number': [],\n 'mix_file_location': []}\n for source_num in range(num_sources):\n mix_info['original_source_{}_file_location'.format(source_num + 1)] = []\n mix_info['separated_source_{}_file_location'.format(source_num + 1)] = []\n mix_info['source_{}_original_sdr'.format(source_num + 1)] = []\n mix_info['separated_source_{}_output_sdr'.format(source_num + 1)] = []\n\n mixes_list = glob.glob(os.path.join(input_path, mix_glob_pattern))\n for filename in tqdm.tqdm(mixes_list):\n dirname = os.path.dirname(os.path.normpath(filename))\n basename = os.path.basename(os.path.normpath(filename))\n m = re.match(mix_regex, basename)\n mix_num = int(m.group('mix_number'))\n mix_info['mix_number'].append(mix_num)\n mix_info['snr'].append(float(m.group('snr')))\n mix_info['mix_file_location'].append(filename)\n\n mix_input = []\n mix_y = scipy.io.wavfile.read(filename)[1]\n for i in range(num_sources):\n mix_input.append(mix_y)\n mix_input = np.stack(mix_input)\n\n original_input = []\n original_input_order = []\n original_source_glob = original_source_glob_format.format(mix_num)\n for original_filename in glob.glob(os.path.join(input_path, original_source_glob)):\n original_basename = os.path.basename(os.path.normpath(original_filename))\n m = re.match(original_regex, original_basename)\n source_num = int(m.group('source_number'))\n original_input_order.append(source_num)\n mix_info['original_source_{}_file_location'.format(source_num)].append(original_filename)\n original_y = scipy.io.wavfile.read(original_filename)[1]\n original_input.append(original_y)\n\n original_input = np.stack(original_input)[np.argsort(original_input_order)]\n\n separated_input = []\n separated_input_order = []\n separated_source_glob = separated_source_glob_format.format(mix_num)\n extended_separated_source_glob = extended_separated_source_glob_format.format(mix_num)\n is_extended = False\n gg = None\n if glob.glob(os.path.join(input_path, separated_source_glob)):\n gg = glob.glob(os.path.join(input_path, separated_source_glob))\n elif glob.glob(os.path.join(input_path, extended_separated_source_glob)):\n gg = glob.glob(os.path.join(input_path, extended_separated_source_glob))\n is_extended = True\n for separated_filename in gg:\n separated_basename = os.path.basename(os.path.normpath(separated_filename))\n m = None\n if not is_extended:\n m = re.match(separated_regex, separated_basename)\n else:\n m = re.match(extended_separated_regex, separated_basename)\n if m.group('stype') != target_stype:\n continue\n source_num = int(m.group('source_number'))\n separated_input_order.append(source_num)\n mix_info['separated_source_{}_file_location'.format(source_num)].append(separated_filename)\n separated_y = scipy.io.wavfile.read(separated_filename)[1]\n separated_input.append(separated_y)\n\n separated_input = np.stack(separated_input)[np.argsort(separated_input_order)]\n\n starting_sdr, starting_sir, starting_sar, starting_perm = bss_eval_sources(original_input, mix_input)\n final_sdr, final_sir, final_sar, final_perm = bss_eval_sources(original_input, separated_input)\n\n for i in range(num_sources):\n mix_info['source_{}_original_sdr'.format(i + 1)].append(starting_sdr[i])\n mix_info['separated_source_{}_output_sdr'.format(i + 1)].append(final_sdr[final_perm][i])\n\n print('writing output CSV file to {}'.format(output_csv_file))\n pd.DataFrame(mix_info).to_csv(output_csv_file, index=False, index_label='mix_number')\n\n\n\nif __name__ == '__main__':\n args = [\n ['/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/lab41/in_sample_test',\n '/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/bss/lab41/in_sample_test.csv'],\n ['/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/lab41/out_of_sample_test',\n '/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/bss/lab41/out_of_sample_test.csv'],\n ['/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/large_lab41/in_sample_test',\n '/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/bss/large_lab41/in_sample_test.csv'],\n ['/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/large_lab41/out_of_sample_test',\n '/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/bss/large_lab41/out_of_sample_test.csv'],\n ['/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/chimera/in_sample_test',\n '/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/bss/chimera/mi_in_sample_test.csv',\n 'mi'],\n ['/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/chimera/out_of_sample_test',\n '/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/bss/chimera/mi_out_of_sample_test.csv',\n 'mi'],\n ['/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/chimera/in_sample_test',\n '/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/bss/chimera/dc_in_sample_test.csv',\n 'dc'],\n ['/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/chimera/out_of_sample_test',\n '/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/bss/chimera/dc_out_of_sample_test.csv',\n 'dc'],\n ['/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/snmf/in_sample_test',\n '/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/bss/snmf/in_sample_test.csv'],\n ['/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/snmf/out_of_sample_test',\n '/local_data/magnolia/experiment_data/date_2017_09_28_time_13_14/aux/evaluations/bss/snmf/out_of_sample_test.csv']\n ]\n \n args = args[8:]\n \n # Parallel\n #processes = []\n #for arg in args:\n # processes.append(mp.Process(target=evaluate, args=arg))\n # processes[-1].start()\n # \n #for process in processes:\n # process.join()\n \n # Parallel\n #pool = mp.Pool(processes=min(len(args), os.cpu_count() - 1))\n pool = mp.Pool(processes=2)\n pool.starmap(evaluate, args)\n \n # Sequential\n #for arg in args:\n # evaluate(*arg)\n", "repo_name": "Lab41/Magnolia", "sub_path": "magnolia/python/analysis/bss_evaluate.py", "file_name": "bss_evaluate.py", "file_ext": "py", "file_size_in_byte": 7999, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 45, "dataset": "github-code", "pt": "76", "api": [{"api_name": "glob.glob", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "tqdm.tqdm", "line_number": 35, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path", "line_number": 36, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.basename", "line_number": 37, "usage_type": "call"}, {"api_name": "os.path", "line_number": 37, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 37, "usage_type": "call"}, {"api_name": "re.match", "line_number": 38, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.io.wavfile.read", "line_number": 45, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.io", "line_number": 45, "usage_type": "attribute"}, {"api_name": "scipy.io.wavfile", "line_number": 45, "usage_type": "name"}, {"api_name": "numpy.stack", "line_number": 48, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 53, "usage_type": "call"}, {"api_name": "os.path", "line_number": 53, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 54, "usage_type": "call"}, {"api_name": "re.match", "line_number": 55, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.io.wavfile.read", "line_number": 59, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.io", "line_number": 59, "usage_type": "attribute"}, {"api_name": "scipy.io.wavfile", "line_number": 59, "usage_type": "name"}, {"api_name": "numpy.stack", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 62, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 70, "usage_type": "call"}, {"api_name": "os.path", "line_number": 70, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 71, "usage_type": "call"}, {"api_name": "os.path", "line_number": 71, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 72, "usage_type": "call"}, {"api_name": "os.path", "line_number": 72, "usage_type": "attribute"}, {"api_name": "glob.glob", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 73, "usage_type": "call"}, {"api_name": "os.path", "line_number": 73, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.normpath", "line_number": 76, "usage_type": "call"}, {"api_name": "re.match", "line_number": 79, "usage_type": "call"}, {"api_name": "re.match", "line_number": 81, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.io.wavfile.read", "line_number": 87, "usage_type": "call"}, {"api_name": "scipy.io.wavfile.io", "line_number": 87, "usage_type": "attribute"}, {"api_name": "scipy.io.wavfile", "line_number": 87, "usage_type": "name"}, {"api_name": "numpy.stack", "line_number": 90, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 90, "usage_type": "call"}, {"api_name": "magnolia.utils.bss_eval.bss_eval_sources", "line_number": 92, "usage_type": "call"}, {"api_name": "magnolia.utils.bss_eval.bss_eval_sources", "line_number": 93, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 100, "usage_type": "call"}, {"api_name": "multiprocessing.Pool", "line_number": 145, "usage_type": "call"}]} +{"seq_id": "3933771806", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 30 12:51:34 2021\r\n\r\n@author: anton\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\r\nfrom sklearn import decomposition, ensemble\r\n\r\ntweet_df = pd.read_csv(r'ready_to_go_1020_220726.csv')\r\n\r\n# transforming data \r\n #train test split\r\n#lda_train_x, lda_test_x, lda_train_y, lda_test_y = model_selection.train_test_split(dense_dfND['tweet'],dense_dfND['vaccine_type'])\r\n\r\n #feature engineering\r\ncv_lda = TfidfVectorizer(stop_words='english',ngram_range=(1,2))\r\n\r\n\r\nlda_text = cv_lda.fit_transform(tweet_df.tweet)\r\n\r\n\r\n\r\n\r\n\r\n#model\r\nlda_model = decomposition.LatentDirichletAllocation(n_components = 4 , verbose=1, max_iter=20)\r\n#mode fit/transform\r\nX_topics = lda_model.fit_transform(lda_text)\r\ntopic_word = lda_model.components_\r\nvocab = cv_lda.get_feature_names()\r\n\r\n\r\nn_top_words = 10\r\ntopic_summaries = []\r\nfor topic_dist in topic_word:\r\n topic_words = np.array(vocab)[np.argsort(topic_dist)][:-(n_top_words+1):-1]\r\n topic_summaries.append(','.join(topic_words))", "repo_name": "AntonG-89/vaccine_se_twitter", "sub_path": "LDA.py", "file_name": "LDA.py", "file_ext": "py", "file_size_in_byte": 1101, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "pandas.read_csv", "line_number": 13, "usage_type": "call"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 20, "usage_type": "call"}, {"api_name": "sklearn.decomposition.LatentDirichletAllocation", "line_number": 30, "usage_type": "call"}, {"api_name": "sklearn.decomposition", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 40, "usage_type": "call"}, {"api_name": "numpy.argsort", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "38728098813", "text": "import sys\nfrom dataclasses import asdict\nfrom pathlib import Path\nfrom typing import List\n\nfrom pandas import DataFrame\n\nfrom src.car_racing import CustomRacing\nfrom src.ppo import PPONetwork, perform_ppo_learning, get_ppo_data, PPOMetadata\n\n# Initialize\nweights_path: Path = Path() / \"data\" / \"results\"\nweights_folder: Path = weights_path / sys.argv[1]\nweights_folder.mkdir(parents=True, exist_ok=True)\nmetadata_path: Path = Path() / \"data\" / \"metadata\"\nnetwork = PPONetwork(weights_folder)\nassert sys.argv[2] == \"-t\" or sys.argv[2] == \"-i\" or sys.argv[2] == \"-d\"\nepisode: int = int(sys.argv[3])\ncar_racing: CustomRacing = CustomRacing(episode)\n \n# Training Mode\nif sys.argv[2] == \"-t\":\n if episode > 0:\n network.load_model(episode)\n perform_ppo_learning(car_racing, network, True)\n \n# Inference Mode\nelif sys.argv[2] == \"-i\":\n network.load_model(episode)\n perform_ppo_learning(car_racing, network, False)\n \n# Collect Metadata Mode (for plots)\nelif sys.argv[2] == \"-d\":\n metadata: List[PPOMetadata] = get_ppo_data(car_racing, network)\n as_dataframe: DataFrame = DataFrame([asdict(data) for data in metadata])\n as_dataframe.to_csv(metadata_path / \"ppo.csv\")\n\n\n\n", "repo_name": "linuslh1996/car-racing-unimore", "sub_path": "train_ppo.py", "file_name": "train_ppo.py", "file_ext": "py", "file_size_in_byte": 1199, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "pathlib.Path", "line_number": 12, "usage_type": "name"}, {"api_name": "pathlib.Path", "line_number": 13, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 13, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 15, "usage_type": "name"}, {"api_name": "src.ppo.PPONetwork", "line_number": 16, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 17, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 18, "usage_type": "attribute"}, {"api_name": "src.car_racing.CustomRacing", "line_number": 19, "usage_type": "name"}, {"api_name": "sys.argv", "line_number": 22, "usage_type": "attribute"}, {"api_name": "src.ppo.perform_ppo_learning", "line_number": 25, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 28, "usage_type": "attribute"}, {"api_name": "src.ppo.perform_ppo_learning", "line_number": 30, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 33, "usage_type": "attribute"}, {"api_name": "typing.List", "line_number": 34, "usage_type": "name"}, {"api_name": "src.ppo.PPOMetadata", "line_number": 34, "usage_type": "name"}, {"api_name": "src.ppo.get_ppo_data", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 35, "usage_type": "name"}, {"api_name": "dataclasses.asdict", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "9504274989", "text": "# Observer notifies about events happening in other objects they observe without coupling to their classes. \n\n# Use case. Each time I need to add the subscription mechanism to let an object subscribe to/ unsubscribe from notifications on the events happening with a specific publisher class, I use the Observer pattern. \n\n# A good example is a simple subscription to news from any online magazine, frequently with the option to choose your sphere of interest (science, digital technology, etc.). Alternatively, the button “Notify me when it’s in stock” for e-commerce platforms is another example.\n\n# ➕ You haven’t to change the publisher’s code to add subscribers’ classes.\n\n# ➖ Subscribers get notifications in random order. \n\n\nfrom __future__ import annotations\nfrom abc import ABC, abstractmethod\nfrom random import randrange\nfrom typing import List\n\nclass Subject(ABC):\n\n @abstractmethod\n def attach(self, observer:Observer):\n ...\n\n @abstractmethod\n def detach(self, observer: Observer):\n raise NotImplementedError\n \n @abstractmethod\n def notify(self):\n pass\n\nclass ConcreteSubject(Subject):\n _state :int = None\n _observers: List[Observer] = []\n\n def attach(self, observer: Observer):\n print(\"subject attached an observer\")\n self._observers.append(observer)\n\n def detach(self, observer: Observer):\n self._observers.remove(observer)\n\n def notify(self):\n print(\"Subject notifying observers...\")\n for observer in self._observers:\n observer.update(self)\n\n def some_business_logic(self):\n print(\"subject im doing something important\")\n self._state = randrange(0,10)\n print(f'subject: my state has just changed to {self._state}')\n self.notify()\n\n\n\n\nclass Observer(ABC):\n @abstractmethod\n def update(self, subject: Subject):\n pass\n\nclass ConcreteObserverA(Observer):\n def update(self, subject: Subject):\n if subject._state < 3:\n print(\"concreteObserverA: Reacted to event\")\n\nclass ConcreteObserverB(Observer):\n def update(self, subject: Subject):\n if subject._state == 0 or subject._state >= 2:\n print(\"Concreate observerB : reacted to event\") \n\nif __name__ == \"__main__\":\n subject = ConcreteSubject()\n\n observer_a = ConcreteObserverA()\n subject.attach(observer_a)\n\n observer_b = ConcreteObserverB()\n subject.attach(observer_b)\n\n subject.some_business_logic()\n subject.some_business_logic()\n\n subject.detach(observer_a)\n\n subject.some_business_logic()\n\n\n# $ python observer.py \n# subject attached an observer\n# subject attached an observer\n# subject im doing something important\n# subject: my state has just changed to 7\n# Subject notifying observers...\n# Concreate observerB : reacted to event\n# subject im doing something important\n# subject: my state has just changed to 6\n# Subject notifying observers...\n# Concreate observerB : reacted to event\n# subject im doing something important\n# subject: my state has just changed to 9\n# Subject notifying observers...\n# Concreate observerB : reacted to event\n", "repo_name": "BNSBNS/pyplaygrd", "sub_path": "python_pattern/behavior/observer.py", "file_name": "observer.py", "file_ext": "py", "file_size_in_byte": 3135, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "abc.ABC", "line_number": 17, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 19, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 23, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 27, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 33, "usage_type": "name"}, {"api_name": "random.randrange", "line_number": 49, "usage_type": "call"}, {"api_name": "abc.ABC", "line_number": 56, "usage_type": "name"}, {"api_name": "abc.abstractmethod", "line_number": 57, "usage_type": "name"}]} +{"seq_id": "3557260584", "text": "import json\n\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom django.http import HttpResponse\n\nfrom myapp.models import Class, Student\n\n\ndef index(request):\n return HttpResponse(\"cy is good man\")\n\n\ndef detail(request, **kwargs):\n return HttpResponse(\"detail {0}, {1}\".format(kwargs['num'], kwargs['num2']))\n\n\ndef myapp_class(request, **kwargs):\n # 去模型中取数据\n class_list = Class.objects.all()\n # 将数据传递给模板,,,模板渲染页面 然后 返回给浏览器\n return render(request, 'myapp/class.html', {'Class': class_list})\n\n\ndef student_info(request, **kwargs):\n print(kwargs)\n # user_id = int(kwargs['user_id'])\n user_id = kwargs['user_id']\n\n # user_id = int(user_id)\n one_student_info = Student.objects.get(id=user_id)\n print(one_student_info)\n print(type(one_student_info))\n return render(request, 'myapp/student_info.html', {'student_info': one_student_info})\n\n\ndef myapp_student(request):\n student_list = Student.objects.all()\n return render(request, 'myapp/student.html', {'student': student_list})\n\n\ndef class_student(request, class_id):\n one_class = Student.objects.filter(sclass_id=class_id)\n # one_class_student = one_class.student_set.all()\n return render(request, 'myapp/class_and_student.html', {'student': one_class})\n\n\ndef view_delete_student(request):\n student = Student.driver.all()\n return render(request, 'myapp/view_delete_student.html', {'delete_student': student})\n\n\ndef add_student(request):\n from faker import Faker\n f = Faker(locale='zh_CN')\n name = f.name()\n sex = f.boolean()\n import random\n age = random.randint(18, 26)\n contend = f.sentences()\n all_class_id = Class.objects.all()\n class_id = random.choice(all_class_id)\n # from myapp.models import create_student # 是用的方法 对应第三种写法\n # student = create_student(name, sex, age, contend, class_id) # 对应第三种写法\n # student = Student().create_student(name, sex, age, contend, class_id) # 对应 models 的第二种写法\n # student = Student.create_student(name, sex, age, contend, class_id) # 对应第一种写法\n student = Student.driver.create_student(name, sex, age, contend, class_id) # 对应第四种写法\n student.save()\n return HttpResponse(\"学生 {0}添加成功,关联在{1}\".format(name, class_id))\n\n\ndef student_page(request, page):\n page = int(page)\n limit = 5\n student = Student.objects.all()[(page - 1) * limit: page * limit]\n return render(request, 'myapp/student.html', {'student': student})\n\n\ndef hello_world(request):\n print(type(request))\n print(request)\n print('get:', request.GET)\n\n if request.GET.get('num', False) :\n resp = {'code': 1000, 'detail': 'success! hello'}\n else:\n resp = {'code': 2100, 'detail': 'fail'}\n\n return HttpResponse(json.dumps(resp), content_type=\"application/json\")\n\n\ndef attribute(request):\n \"\"\"查看他们的属性\"\"\"\n print(\"path:\", request.path)\n print('method:', request.method)\n print('encoding:', request.encoding)\n print('get:', request.GET)\n print('post:', request.POST)\n print('cookies:', request.COOKIES)\n print('session:', request.session)\n print('files:', request.FILES)\n return HttpResponse('attribute')\n\n\ndef attribute_get1(request):\n \"\"\"查看他们的属性\"\"\"\n a = request.GET['a']\n b = request.GET.get('b')\n c = request.GET['c']\n print(\"path:\", request.path)\n print('method:', request.method)\n print('encoding:', request.encoding)\n print('get:', request.GET)\n print('post:', request.POST)\n print('cookies:', request.COOKIES)\n print('session:', request.session)\n print('files:', request.FILES)\n return HttpResponse(\"a:{a}\\nb:{b}\\nc:{c}\".format(a=a, b=b, c=c))\n\n\ndef attribute_get2(request):\n \"\"\"查看他们的属性\"\"\"\n a = request.GET.getlist('a')\n b = request.GET.getlist('b')\n c = request.GET.getlist('c')\n print(\"path:\", request.path)\n print('method:', request.method)\n print('encoding:', request.encoding)\n print('get:', request.GET)\n print('post:', request.POST)\n print('cookies:', request.COOKIES)\n print('session:', request.session)\n print('files:', request.FILES)\n return HttpResponse(\"a:{a}\\nb:{b}\\nc:{c}\".format(a=a, b=b, c=c))\n\n\ndef show_register(request):\n return render(request, 'myapp/register.html')\n\n\ndef register(request):\n name = request.POST['name']\n age = request.POST['age']\n sex = request.POST['sex']\n bobby = request.POST.getlist('bobby')\n info = {\n 'name': name,\n 'age': age,\n 'sex': sex,\n 'hobby': bobby,\n 'code': 1000\n }\n return HttpResponse(json.dumps(info), content_type='application/json')\n\n\ndef show_response(request):\n result = HttpResponse()\n result.content = b'good'\n print(result.charset)\n print(result.content)\n print(result.status_code)\n return result\n\n\ndef show_cookie(request):\n \"\"\"设置cookie\"\"\"\n res = HttpResponse()\n # cookie = request.COOKIES\n # res.write(\"

{}

\".format(cookie['sid']))\n res.delete_cookie('sid') # 删除 cookie\n # res.set_cookie('sid', 'WSEGSLIF87665DFWS0j')\n return res\n\n\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import redirect\n\n\n# 重定向\ndef show_redirect1(request):\n \"\"\"url 配置 这个 但是 跳转到下面的一个\"\"\"\n return HttpResponseRedirect('/show_redirect2')\n # return redirect('/show_redirect2')\n\n\ndef show_redirect2(request):\n data = {\n 'code': 1000,\n 'status': 1\n }\n return HttpResponse(json.dumps(data))\n", "repo_name": "SirCYong/LearningTogetherDjango", "sub_path": "myapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 5645, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.http.HttpResponse", "line_number": 12, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 16, "usage_type": "call"}, {"api_name": "myapp.models.Class.objects.all", "line_number": 21, "usage_type": "call"}, {"api_name": "myapp.models.Class.objects", "line_number": 21, "usage_type": "attribute"}, {"api_name": "myapp.models.Class", "line_number": 21, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 23, "usage_type": "call"}, {"api_name": "myapp.models.Student.objects.get", "line_number": 32, "usage_type": "call"}, {"api_name": "myapp.models.Student.objects", "line_number": 32, "usage_type": "attribute"}, {"api_name": "myapp.models.Student", "line_number": 32, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 35, "usage_type": "call"}, {"api_name": "myapp.models.Student.objects.all", "line_number": 39, "usage_type": "call"}, {"api_name": "myapp.models.Student.objects", "line_number": 39, "usage_type": "attribute"}, {"api_name": "myapp.models.Student", "line_number": 39, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 40, "usage_type": "call"}, {"api_name": "myapp.models.Student.objects.filter", "line_number": 44, "usage_type": "call"}, {"api_name": "myapp.models.Student.objects", "line_number": 44, "usage_type": "attribute"}, {"api_name": "myapp.models.Student", "line_number": 44, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 46, "usage_type": "call"}, {"api_name": "myapp.models.Student.driver.all", "line_number": 50, "usage_type": "call"}, {"api_name": "myapp.models.Student.driver", "line_number": 50, "usage_type": "attribute"}, {"api_name": "myapp.models.Student", "line_number": 50, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 51, "usage_type": "call"}, {"api_name": "faker.Faker", "line_number": 56, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 60, "usage_type": "call"}, {"api_name": "myapp.models.Class.objects.all", "line_number": 62, "usage_type": "call"}, {"api_name": "myapp.models.Class.objects", "line_number": 62, "usage_type": "attribute"}, {"api_name": "myapp.models.Class", "line_number": 62, "usage_type": "name"}, {"api_name": "random.choice", "line_number": 63, "usage_type": "call"}, {"api_name": "myapp.models.Student.driver.create_student", "line_number": 68, "usage_type": "call"}, {"api_name": "myapp.models.Student.driver", "line_number": 68, "usage_type": "attribute"}, {"api_name": "myapp.models.Student", "line_number": 68, "usage_type": "name"}, {"api_name": "django.http.HttpResponse", "line_number": 70, "usage_type": "call"}, {"api_name": "myapp.models.Student.objects.all", "line_number": 76, "usage_type": "call"}, {"api_name": "myapp.models.Student.objects", "line_number": 76, "usage_type": "attribute"}, {"api_name": "myapp.models.Student", "line_number": 76, "usage_type": "name"}, {"api_name": "django.shortcuts.render", "line_number": 77, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 90, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 90, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 103, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 119, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 135, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 139, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 154, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 154, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 158, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 168, "usage_type": "call"}, {"api_name": "django.http.HttpResponseRedirect", "line_number": 183, "usage_type": "call"}, {"api_name": "django.http.HttpResponse", "line_number": 192, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 192, "usage_type": "call"}]} +{"seq_id": "1632291340", "text": "import csv\nfrom flask import Flask\nfrom flask import abort\n# this is telling it to go to folder flask and import tool Flask\nfrom flask import render_template\napp = Flask(__name__)\n\ndef fetch_csv(csv_path):\n\tcsv_file = open(csv_path, \"rb\")\n\tcsv_obj = csv.DictReader(csv_file)\n\t\t# returns a dictionary (headers)\n\tcsv_list = list(csv_obj)\n\treturn csv_list\n\n\n@app.route(\"/\")\ndef index():\n\ttemplate = \"index.html\"\n\tobject_list = fetch_csv(\"./static/la-riots-deaths.csv\")\n\t\t#the list of data returned by the csv\n\treturn render_template(template, object_list=object_list)\n\n@app.route(\"//\")\ndef detail(row_id):\n\ttemplate = 'detail.html'\n\tobject_list = fetch_csv(\"./static/la-riots-deaths.csv\")\n\tfor row in object_list: \n\t\tif row['id'] == row_id:\n\t\t\treturn render_template(template, object=row)\n\tabort(404)\n\nif __name__ == \"__main__\":\n # Fire up the Flask test server\n app.run(debug=True, use_reloader=True)\n # If this thing is being booted up from the terminal, run this thing.\n # This will give us a barebones web server to work with.\n # The colon is super important!", "repo_name": "amberjrivera/first-news-app", "sub_path": "app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 1083, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "csv.DictReader", "line_number": 10, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 21, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 29, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "41944034096", "text": "import time\n\nfrom rest_framework.authentication import TokenAuthentication\nfrom rest_framework import exceptions\n\n\nclass DelayTokenAuthentication(TokenAuthentication):\n \"\"\"\n Simple token based authentication with configurable delay\n\n Clients should authenticate by passing the token key in the \"Authorization\"\n HTTP header, prepended with the string \"Token \". For example:\n\n Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a\n \"\"\"\n HARDCODED_TOKEN = '224a93060c0dd4fb931d05083b4cb7b6a8c27df8'\n DELAY = 0.00001\n\n def authenticate_credentials(self, key):\n if len(key) != len(self.HARDCODED_TOKEN):\n raise exceptions.AuthenticationFailed('Invalid token.')\n\n for i in xrange(len(key)):\n if key[i] == self.HARDCODED_TOKEN[i]:\n time.sleep(self.DELAY)\n else:\n raise exceptions.AuthenticationFailed('Invalid token.')\n", "repo_name": "andresriancho/django-rest-framework-timing", "sub_path": "timing/example/authentication/token.py", "file_name": "token.py", "file_ext": "py", "file_size_in_byte": 929, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "rest_framework.authentication.TokenAuthentication", "line_number": 7, "usage_type": "name"}, {"api_name": "rest_framework.exceptions.AuthenticationFailed", "line_number": 21, "usage_type": "call"}, {"api_name": "rest_framework.exceptions", "line_number": 21, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 25, "usage_type": "call"}, {"api_name": "rest_framework.exceptions.AuthenticationFailed", "line_number": 27, "usage_type": "call"}, {"api_name": "rest_framework.exceptions", "line_number": 27, "usage_type": "name"}]} +{"seq_id": "2605345038", "text": "from flask import Flask, jsonify\nfrom flask_restplus import Resource, Api\nfrom apis.fortify.FortifyPushFPR import fortify_push_fpr\n\napp = Flask(__name__)\n\napp.register_blueprint(fortify_push_fpr)\napi = Api(app)\n\n\n@api.route('/hello')\nclass HelloWorld(Resource):\n def get(self):\n return 'foritfy-app from dr-octopus'\n\n\nif __name__ == '__main__':\n app.run(debug = True, host = '0.0.0.0')\n", "repo_name": "cihatyildiz/vm-scripts", "sub_path": "dr-octo/dr-octo/fortify_app.py", "file_name": "fortify_app.py", "file_ext": "py", "file_size_in_byte": 393, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "flask.Flask", "line_number": 5, "usage_type": "call"}, {"api_name": "apis.fortify.FortifyPushFPR.fortify_push_fpr", "line_number": 7, "usage_type": "argument"}, {"api_name": "flask_restplus.Api", "line_number": 8, "usage_type": "call"}, {"api_name": "flask_restplus.Resource", "line_number": 12, "usage_type": "name"}]} +{"seq_id": "4466690403", "text": "import json, os\nfrom functools import singledispatch\n\nimport torch\n\nfrom hydragnn.preprocess.load_data import dataset_loading_and_splitting\nfrom hydragnn.preprocess.utils import check_if_graph_size_constant\nfrom hydragnn.utils.distributed import setup_ddp\nfrom hydragnn.utils.model import load_existing_model\nfrom hydragnn.utils.time_utils import print_timers\nfrom hydragnn.utils.config_utils import (\n update_config_NN_outputs,\n normalize_output_config,\n get_log_name_config,\n)\nfrom hydragnn.utils.model import calculate_PNA_degree\nfrom hydragnn.models.create import create_model_config\nfrom hydragnn.train.train_validate_test import test\nfrom hydragnn.postprocess.postprocess import output_denormalize\n\n\n@singledispatch\ndef run_prediction(config):\n raise TypeError(\"Input must be filename string or configuration dictionary.\")\n\n\n@run_prediction.register\ndef _(config_file: str):\n\n config = {}\n with open(config_file, \"r\") as f:\n config = json.load(f)\n\n run_prediction(config)\n\n\n@run_prediction.register\ndef _(config: dict):\n\n try:\n os.environ[\"SERIALIZED_DATA_PATH\"]\n except:\n os.environ[\"SERIALIZED_DATA_PATH\"] = os.getcwd()\n\n world_size, world_rank = setup_ddp()\n\n verbosity = config[\"Verbosity\"][\"level\"]\n train_loader, val_loader, test_loader = dataset_loading_and_splitting(config=config)\n\n graph_size_variable = check_if_graph_size_constant(\n train_loader, val_loader, test_loader\n )\n config = update_config_NN_outputs(config, graph_size_variable)\n\n config = normalize_output_config(config)\n\n config[\"NeuralNetwork\"][\"Architecture\"][\"input_dim\"] = len(\n config[\"NeuralNetwork\"][\"Variables_of_interest\"][\"input_node_features\"]\n )\n max_neigh = config[\"NeuralNetwork\"][\"Architecture\"][\"max_neighbours\"]\n if config[\"NeuralNetwork\"][\"Architecture\"][\"model_type\"] == \"PNA\":\n deg = calculate_PNA_degree(train_loader.dataset, max_neigh)\n else:\n deg = None\n model = create_model_config(\n config=config[\"NeuralNetwork\"][\"Architecture\"],\n num_nodes=train_loader.dataset[0].num_nodes,\n max_neighbours=max_neigh,\n pna_deg=deg,\n verbosity=config[\"Verbosity\"][\"level\"],\n )\n\n log_name = get_log_name_config(config)\n load_existing_model(model, log_name)\n\n (\n error,\n error_rmse_task,\n true_values,\n predicted_values,\n ) = test(test_loader, model, config[\"Verbosity\"][\"level\"])\n\n ##output predictions with unit/not normalized\n if config[\"NeuralNetwork\"][\"Variables_of_interest\"][\"denormalize_output\"]:\n true_values, predicted_values = output_denormalize(\n config[\"NeuralNetwork\"][\"Variables_of_interest\"][\"y_minmax\"],\n true_values,\n predicted_values,\n )\n\n return error, error_rmse_task, true_values, predicted_values\n", "repo_name": "jychoi-hpc/HydraGNN", "sub_path": "hydragnn/run_prediction.py", "file_name": "run_prediction.py", "file_ext": "py", "file_size_in_byte": 2856, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "76", "api": [{"api_name": "functools.singledispatch", "line_number": 22, "usage_type": "name"}, {"api_name": "json.load", "line_number": 32, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 41, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 43, "usage_type": "attribute"}, {"api_name": "os.getcwd", "line_number": 43, "usage_type": "call"}, {"api_name": "hydragnn.utils.distributed.setup_ddp", "line_number": 45, "usage_type": "call"}, {"api_name": "hydragnn.preprocess.load_data.dataset_loading_and_splitting", "line_number": 48, "usage_type": "call"}, {"api_name": "hydragnn.preprocess.utils.check_if_graph_size_constant", "line_number": 50, "usage_type": "call"}, {"api_name": "hydragnn.utils.config_utils.update_config_NN_outputs", "line_number": 53, "usage_type": "call"}, {"api_name": "hydragnn.utils.config_utils.normalize_output_config", "line_number": 55, "usage_type": "call"}, {"api_name": "hydragnn.utils.model.calculate_PNA_degree", "line_number": 62, "usage_type": "call"}, {"api_name": "hydragnn.models.create.create_model_config", "line_number": 65, "usage_type": "call"}, {"api_name": "hydragnn.utils.config_utils.get_log_name_config", "line_number": 73, "usage_type": "call"}, {"api_name": "hydragnn.utils.model.load_existing_model", "line_number": 74, "usage_type": "call"}, {"api_name": "hydragnn.train.train_validate_test.test", "line_number": 81, "usage_type": "call"}, {"api_name": "hydragnn.postprocess.postprocess.output_denormalize", "line_number": 85, "usage_type": "call"}]} +{"seq_id": "33692682293", "text": "import sqlite3\n\n\ndef one_animal(itemid):\n with sqlite3.connect('animal.db') as connection:\n cursor = connection.cursor()\n query = \"\"\"\n SELECT new_animals_table.name_id, new_animals_table.name, animal_type.animal_type, breed.breed,\n colors.colors, new_animals_table.date_of_birth, outcome_type.outcome_type, outcome_month.outcome_month,\n outcome_year.outcome_year\n FROM new_animals_table\n JOIN animal_color ON animal_color.animals_id = new_animals_table.name_id\n JOIN colors ON colors.colors_id = animal_color.color_id\n INNER JOIN animal_type ON new_animals_table.animal_type_id = animal_type.animal_type_id\n INNER JOIN breed ON new_animals_table.breed_id = breed.breed_id\n INNER JOIN outcome_type ON new_animals_table.outcome_type_id = outcome_type.outcome_type_id\n INNER JOIN outcome_month ON new_animals_table.outcome_month_id = outcome_month.outcome_month_id\n INNER JOIN outcome_year ON new_animals_table.outcome_year_id = outcome_year.outcome_year_id\n WHERE new_animals_table.name_id = ?\n \"\"\"\n cursor.execute(query, (itemid,))\n executed_query = cursor.fetchall()\n results = executed_query\n\n if len(results) == 1:\n animal = {'name': results[0][1],\n 'animal_type': results[0][2],\n 'breed': results[0][3],\n 'color1': results[0][4],\n 'date_of_birth': results[0][5],\n 'outcome_type': results[0][6],\n 'outcome_mounth': results[0][7],\n 'outcome_year': results[0][8]\n }\n return animal\n else:\n\n animal = {'name': results[0][1],\n 'animal_type': results[0][2],\n 'breed': results[0][3],\n 'color1': results[0][4],\n 'color2': results[1][4],\n 'date_of_birth': results[0][5],\n 'outcome_type': results[0][6],\n 'outcome_mounth': results[0][7],\n 'outcome_year': results[0][8]\n }\n return animal\n\n\n\n", "repo_name": "K-Maxim/hw15", "sub_path": "utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 2157, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "sqlite3.connect", "line_number": 5, "usage_type": "call"}]} +{"seq_id": "2296056844", "text": "from http.server import HTTPServer\nfrom webbrowser import open_new\nfrom http_server_handler import HTTPServerHandler\n\n\nclass TokenHandler:\n \"\"\"\n Class used to handle oAuth\n \"\"\"\n\n def __init__(self, client_id, client_secret, access_url, api, name_api):\n self._id = client_id\n self._secret = client_secret\n self._access_url = access_url\n self._api = api\n self.name_api = name_api\n\n def get_access_token(self):\n open_new(self._access_url)\n\n http_server = HTTPServer(('localhost', 8080),\n lambda request, address, server:\n HTTPServerHandler\n (request, address, server,\n self._id, self._secret,\n self._api, self.name_api))\n\n # This function will block until it receives a request\n http_server.socket.settimeout(40)\n http_server.handle_request()\n try:\n return http_server.access_token\n except AttributeError:\n return None, None, 'Connection error'\n", "repo_name": "vakyym07/export_contacts", "sub_path": "token_handler.py", "file_name": "token_handler.py", "file_ext": "py", "file_size_in_byte": 1123, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "webbrowser.open_new", "line_number": 19, "usage_type": "call"}, {"api_name": "http.server.HTTPServer", "line_number": 21, "usage_type": "call"}, {"api_name": "http_server_handler.HTTPServerHandler", "line_number": 23, "usage_type": "call"}]} +{"seq_id": "75120198324", "text": "\"\"\"\nThanks to A_n_g_e_l_a for the cookies!\n\nITV\nAuthor: stabbedbybrick\n\nInfo:\nITV L3 is 720p, AAC 2.0 max\n\n\"\"\"\nfrom __future__ import annotations\n\nimport base64\nimport subprocess\nimport json\nimport shutil\nimport sys\n\nfrom collections import Counter\nfrom pathlib import Path\n\nimport click\nimport requests\nimport yaml\n\nfrom bs4 import BeautifulSoup\n\nfrom utils.utilities import (\n info,\n error,\n is_url,\n string_cleaning,\n set_save_path,\n set_filename,\n add_subtitles,\n construct_pssh,\n get_wvd,\n geo_error,\n premium_error,\n)\nfrom utils.titles import Episode, Series, Movie, Movies\nfrom utils.options import get_downloads\nfrom utils.args import get_args\nfrom utils.info import print_info\nfrom utils.config import Config\nfrom utils.cdm import LocalCDM\n\n\nclass ITV(Config):\n def __init__(self, config, srvc_api, srvc_config, **kwargs):\n super().__init__(config, srvc_api, srvc_config, **kwargs)\n\n with open(self.srvc_api, \"r\") as f:\n self.config.update(yaml.safe_load(f))\n\n self.get_options()\n\n def get_license(self, challenge: bytes, lic_url: str) -> bytes:\n r = self.client.post(url=lic_url, data=challenge)\n if not r.is_success:\n error(f\"License request failed: {r.status_code}\")\n exit(1)\n return r.content\n\n def get_keys(self, pssh: str, lic_url: str) -> bytes:\n wvd = get_wvd(Path.cwd())\n with self.console.status(\"Getting decryption keys...\"):\n widevine = LocalCDM(wvd)\n challenge = widevine.challenge(pssh)\n response = self.get_license(challenge, lic_url)\n return widevine.parse(response)\n\n def get_data(self, url: str) -> dict:\n soup = BeautifulSoup(self.client.get(url), \"html.parser\")\n props = soup.select_one(\"#__NEXT_DATA__\").text\n data = json.loads(props)\n return data[\"props\"][\"pageProps\"]\n\n def get_series(self, url: str) -> Series:\n data = self.get_data(url)\n\n return Series(\n [\n Episode(\n id_=None,\n service=\"ITV\",\n title=data[\"programme\"][\"title\"],\n season=episode.get(\"series\") or 0,\n number=episode.get(\"episode\") or 0,\n name=episode[\"episodeTitle\"],\n year=None,\n data=episode[\"playlistUrl\"],\n description=episode.get(\"description\"),\n )\n for series in data[\"seriesList\"]\n if \"Latest episodes\" not in series[\"seriesLabel\"]\n for episode in series[\"titles\"]\n ]\n )\n\n def get_movies(self, url: str) -> Movies:\n data = self.get_data(url)\n\n return Movies(\n [\n Movie(\n id_=None,\n service=\"ITV\",\n title=data[\"programme\"][\"title\"],\n year=movie.get(\"productionYear\"),\n name=data[\"programme\"][\"title\"],\n data=movie[\"playlistUrl\"],\n synopsis=movie.get(\"description\"),\n )\n for movies in data[\"seriesList\"]\n for movie in movies[\"titles\"]\n ]\n )\n\n def get_playlist(self, playlist: str) -> tuple:\n featureset = {\n k: (\"mpeg-dash\", \"widevine\", \"outband-webvtt\", \"hd\", \"single-track\")\n for k in (\"min\", \"max\")\n }\n payload = {\n \"client\": {\"id\": \"browser\"},\n \"variantAvailability\": {\"featureset\": featureset, \"platformTag\": \"dotcom\"},\n }\n\n r = self.client.post(playlist, json=payload)\n if not r.is_success:\n premium_error(\n r.status_code\n ) if \"UserTokenValidationFailed\" in r.text else geo_error(\n r.status_code, None, location=\"UK\"\n )\n\n data = r.json()\n\n video = data[\"Playlist\"][\"Video\"]\n media = video[\"MediaFiles\"]\n mpd_url = f\"{video.get('Base')}{media[0].get('Href')}\"\n lic_url = f\"{media[0].get('KeyServiceUrl')}\"\n subtitle = video.get(\"Subtitles\")\n subtitle = f\"{subtitle[0].get('Href')}\" if subtitle else None\n\n return mpd_url, lic_url, subtitle\n\n\n def get_mediainfo(self, manifest: str, quality: str, subtitle: str) -> str:\n r = requests.get(manifest)\n if not r.ok:\n click.echo(f\"\\n\\nError! {r.status_code}\\n{r.content}\")\n sys.exit(1)\n\n self.soup = BeautifulSoup(r.content, \"xml\")\n elements = self.soup.find_all(\"Representation\")\n heights = sorted(\n [int(x.attrs[\"height\"]) for x in elements if x.attrs.get(\"height\")],\n reverse=True,\n )\n\n new_base, params = manifest.split(\".mpd\")\n new_base += \"dash/\"\n self.soup.select_one(\"BaseURL\").string = new_base\n\n segments = self.soup.find_all(\"SegmentTemplate\")\n for segment in segments:\n segment[\"media\"] += params\n segment[\"initialization\"] += params\n\n if subtitle is not None:\n self.soup = add_subtitles(self.soup, subtitle)\n\n with open(self.tmp / \"manifest.mpd\", \"w\") as f:\n f.write(str(self.soup.prettify()))\n\n if quality is not None:\n if int(quality) in heights:\n return quality\n else:\n closest_match = min(heights, key=lambda x: abs(int(x) - int(quality)))\n return closest_match\n\n return heights[0]\n\n def get_content(self, url: str) -> object:\n if self.movie:\n with self.console.status(\"Fetching titles...\"):\n content = self.get_movies(self.url)\n title = string_cleaning(str(content))\n\n info(f\"{str(content)}\\n\")\n\n else:\n with self.console.status(\"Fetching titles...\"):\n content = self.get_series(url)\n\n title = string_cleaning(str(content))\n seasons = Counter(x.season for x in content)\n num_seasons = len(seasons)\n num_episodes = sum(seasons.values())\n\n info(\n f\"{str(content)}: {num_seasons} Season(s), {num_episodes} Episode(s)\\n\"\n )\n\n return content, title\n\n def get_episode_from_url(self, url: str):\n data = self.get_data(url)\n\n episode = Series(\n [\n Episode(\n id_=None,\n service=\"ITV\",\n title=data[\"programme\"][\"title\"],\n season=data[\"episode\"].get(\"series\") or 0,\n number=data[\"episode\"].get(\"episode\") or 0,\n name=data[\"episode\"][\"episodeTitle\"],\n year=None,\n data=data[\"episode\"][\"playlistUrl\"],\n description=data[\"episode\"].get(\"description\"),\n )\n ]\n )\n\n title = string_cleaning(str(episode))\n\n return [episode[0]], title\n\n def get_options(self) -> None:\n downloads, title = get_downloads(self)\n\n for download in downloads:\n self.download(download, title)\n\n def download(self, stream: object, title: str) -> None:\n with self.console.status(\"Getting media info...\"):\n manifest, lic_url, subtitle = self.get_playlist(stream.data)\n self.res = self.get_mediainfo(manifest, self.quality, subtitle)\n pssh = construct_pssh(self.soup)\n\n keys = self.get_keys(pssh, lic_url)\n with open(self.tmp / \"keys.txt\", \"w\") as file:\n file.write(\"\\n\".join(keys))\n\n if self.info:\n print_info(self, stream, keys)\n\n self.filename = set_filename(self, stream, self.res, audio=\"AAC2.0\")\n self.save_path = set_save_path(stream, self, title)\n self.manifest = self.tmp / \"manifest.mpd\"\n self.key_file = self.tmp / \"keys.txt\"\n self.sub_path = None\n\n info(f\"{str(stream)}\")\n for key in keys:\n info(f\"{key}\")\n click.echo(\"\")\n\n args, file_path = get_args(self)\n\n if not file_path.exists():\n try:\n subprocess.run(args, check=True)\n except Exception as e:\n raise ValueError(f\"{e}\")\n else:\n info(f\"{self.filename} already exist. Skipping download\\n\")\n self.sub_path.unlink() if self.sub_path else None\n pass\n", "repo_name": "stabbedbybrick/freevine", "sub_path": "services/itv/itv.py", "file_name": "itv.py", "file_ext": "py", "file_size_in_byte": 8492, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 47, "dataset": "github-code", "pt": "76", "api": [{"api_name": "utils.config.Config", "line_number": 49, "usage_type": "name"}, {"api_name": "yaml.safe_load", "line_number": 54, "usage_type": "call"}, {"api_name": "utils.utilities.error", "line_number": 61, "usage_type": "call"}, {"api_name": "utils.utilities.get_wvd", "line_number": 66, "usage_type": "call"}, {"api_name": "pathlib.Path.cwd", "line_number": 66, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 66, "usage_type": "name"}, {"api_name": "utils.cdm.LocalCDM", "line_number": 68, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 74, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 76, "usage_type": "call"}, {"api_name": "utils.titles.Series", "line_number": 82, "usage_type": "call"}, {"api_name": "utils.titles.Episode", "line_number": 84, "usage_type": "call"}, {"api_name": "utils.titles.Series", "line_number": 79, "usage_type": "name"}, {"api_name": "utils.titles.Movies", "line_number": 104, "usage_type": "call"}, {"api_name": "utils.titles.Movie", "line_number": 106, "usage_type": "call"}, {"api_name": "utils.titles.Movies", "line_number": 101, "usage_type": "name"}, {"api_name": "utils.utilities.premium_error", "line_number": 132, "usage_type": "call"}, {"api_name": "utils.utilities.geo_error", "line_number": 134, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 151, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 153, "usage_type": "call"}, {"api_name": "sys.exit", "line_number": 154, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 156, "usage_type": "call"}, {"api_name": "utils.utilities.add_subtitles", "line_number": 173, "usage_type": "call"}, {"api_name": "utils.utilities.string_cleaning", "line_number": 191, "usage_type": "call"}, {"api_name": "utils.utilities.info", "line_number": 193, "usage_type": "call"}, {"api_name": "utils.utilities.string_cleaning", "line_number": 199, "usage_type": "call"}, {"api_name": "collections.Counter", "line_number": 200, "usage_type": "call"}, {"api_name": "utils.utilities.info", "line_number": 204, "usage_type": "call"}, {"api_name": "utils.titles.Series", "line_number": 213, "usage_type": "call"}, {"api_name": "utils.titles.Episode", "line_number": 215, "usage_type": "call"}, {"api_name": "utils.utilities.string_cleaning", "line_number": 229, "usage_type": "call"}, {"api_name": "utils.options.get_downloads", "line_number": 234, "usage_type": "call"}, {"api_name": "utils.utilities.construct_pssh", "line_number": 243, "usage_type": "call"}, {"api_name": "utils.info.print_info", "line_number": 250, "usage_type": "call"}, {"api_name": "utils.utilities.set_filename", "line_number": 252, "usage_type": "call"}, {"api_name": "utils.utilities.set_save_path", "line_number": 253, "usage_type": "call"}, {"api_name": "utils.utilities.info", "line_number": 258, "usage_type": "call"}, {"api_name": "utils.utilities.info", "line_number": 260, "usage_type": "call"}, {"api_name": "click.echo", "line_number": 261, "usage_type": "call"}, {"api_name": "utils.args.get_args", "line_number": 263, "usage_type": "call"}, {"api_name": "subprocess.run", "line_number": 267, "usage_type": "call"}, {"api_name": "utils.utilities.info", "line_number": 271, "usage_type": "call"}]} +{"seq_id": "3188814111", "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport curses\nimport sys\nimport time\n\nimport options\nfrom life import Life\n\n\ndef main(args):\n curses.wrapper(CLI(options.parse(args)).run)\n\n\nclass CLI:\n outputs = {0: \"∙\", 1: \"█\"}\n\n def __init__(self, options):\n self.options = options\n\n def run(self, stdscr):\n CLIRunner.load(self.options, stdscr).run()\n\n\nclass CLIRunner:\n @staticmethod\n def load(app_options, stdscr):\n if app_options.input_file:\n life = Life.from_file(app_options.input_file)\n else:\n height, width = stdscr.getmaxyx()\n width = width\n height = height\n life = Life.random(height - 1, width - 1)\n return CLIRunner(app_options.style, stdscr, life)\n\n def __init__(self, style, stdscr, life):\n self.style = style\n self.stdscr = stdscr\n self.life = life\n self.width = life.width\n self.height = life.height\n self.marker = (int(self.width / 2), int(self.height / 2))\n self.show_marker = False\n\n def run(self):\n curses.noecho()\n curses.cbreak()\n curses.start_color()\n curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)\n curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_WHITE)\n\n mode = self.live\n try:\n while True:\n next_mode = mode()\n if next_mode:\n mode = next_mode\n except (Quit, KeyboardInterrupt):\n pass\n\n def live(self):\n self.stdscr.nodelay(True)\n self.show_marker = False\n\n self.display()\n self.life = self.life.step(self.style)\n time.sleep(0.1)\n\n if self.read() == \" \":\n return self.pause\n\n def pause(self):\n self.stdscr.nodelay(False)\n\n self.show_marker = True\n self.display()\n\n ch = self.read()\n if ch == \" \":\n return self.live\n elif ch == \"KEY_UP\":\n self.marker = (\n self.marker[0] % self.width,\n (self.marker[1] - 1) % self.height,\n )\n elif ch == \"KEY_DOWN\":\n self.marker = (\n self.marker[0] % self.width,\n (self.marker[1] + 1) % self.height,\n )\n elif ch == \"KEY_LEFT\":\n self.marker = (\n (self.marker[0] - 1) % self.width,\n self.marker[1] % self.height,\n )\n elif ch == \"KEY_RIGHT\":\n self.marker = (\n (self.marker[0] + 1) % self.width,\n self.marker[1] % self.height,\n )\n\n def read(self):\n try:\n ch = self.stdscr.getkey()\n except curses.error:\n return\n\n if ch == \"q\":\n raise Quit()\n else:\n return ch\n\n def display(self):\n self.stdscr.clear()\n for i, line in enumerate(self.life.matrix.tolist()):\n self.stdscr.addstr(\n i, 0, \"\".join(CLI.outputs[n] for n in line), curses.color_pair(1)\n )\n if self.show_marker:\n self.stdscr.addstr(\n self.marker[1], self.marker[0], \" \", curses.color_pair(2)\n )\n self.stdscr.move(self.height - 1, 0)\n self.stdscr.refresh()\n\n\nclass Quit(Exception):\n pass\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n", "repo_name": "SamirTalwar/predestination", "sub_path": "src/cli.py", "file_name": "cli.py", "file_ext": "py", "file_size_in_byte": 3390, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "76", "api": [{"api_name": "curses.wrapper", "line_number": 13, "usage_type": "call"}, {"api_name": "options.parse", "line_number": 13, "usage_type": "call"}, {"api_name": "life.Life.from_file", "line_number": 30, "usage_type": "call"}, {"api_name": "life.Life", "line_number": 30, "usage_type": "name"}, {"api_name": "life.Life.random", "line_number": 35, "usage_type": "call"}, {"api_name": "life.Life", "line_number": 35, "usage_type": "name"}, {"api_name": "life.width", "line_number": 42, "usage_type": "attribute"}, {"api_name": "life.height", "line_number": 43, "usage_type": "attribute"}, {"api_name": "curses.noecho", "line_number": 48, "usage_type": "call"}, {"api_name": "curses.cbreak", "line_number": 49, "usage_type": "call"}, {"api_name": "curses.start_color", "line_number": 50, "usage_type": "call"}, {"api_name": "curses.init_pair", "line_number": 51, "usage_type": "call"}, {"api_name": "curses.COLOR_GREEN", "line_number": 51, "usage_type": "attribute"}, {"api_name": "curses.COLOR_BLACK", "line_number": 51, "usage_type": "attribute"}, {"api_name": "curses.init_pair", "line_number": 52, "usage_type": "call"}, {"api_name": "curses.COLOR_WHITE", "line_number": 52, "usage_type": "attribute"}, {"api_name": "time.sleep", "line_number": 69, "usage_type": "call"}, {"api_name": "curses.error", "line_number": 107, "usage_type": "attribute"}, {"api_name": "curses.color_pair", "line_number": 119, "usage_type": "call"}, {"api_name": "curses.color_pair", "line_number": 123, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 134, "usage_type": "attribute"}]} +{"seq_id": "72355753524", "text": "import sys\nimport math\nfrom pymavlink import mavutil\nfrom time import time, sleep\nfrom common import print_usage, init_mavlink, print_all_messages\nimport threading\n\n\ndef send_heartbeat(the_connection):\n base_mode = mavutil.mavlink.MAV_MODE_FLAG_MANUAL_INPUT_ENABLED + \\\n mavutil.mavlink.MAV_MODE_FLAG_STABILIZE_ENABLED + \\\n mavutil.mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED\n custom_mode = 0\n system_status = mavutil.mavlink.MAV_STATE_ACTIVE if drone['active'] \\\n else mavutil.mavlink.MAV_STATE_STANDBY\n\n the_connection.mav.heartbeat_send(\n type=mavutil.mavlink.MAV_TYPE_QUADROTOR,\n autopilot=mavutil.mavlink.MAV_AUTOPILOT_ARDUPILOTMEGA,\n base_mode=base_mode,\n custom_mode=custom_mode,\n system_status=system_status)\n\n\ndef send_data_stream_position(the_connection):\n the_connection.mav.global_position_int_send(\n time_boot_ms=0,\n lat=int(drone['lat'] * 10**7), # Converts degrees into degreesE7\n lon=int(drone['lon'] * 10**7), # Converts degrees into degreesE7\n alt=int(drone['alt'] * 1000), # Converts m into mm\n relative_alt=int(drone['alt'] * 1000), # Converts m into mm\n vx=drone['vx'],\n vy=drone['vy'],\n vz=drone['vz'],\n hdg=65535)\n\n\ndef heartbeat_thread(connection, interval):\n while True:\n send_heartbeat(connection)\n sleep(interval)\n\n\ndef global_position_int_thread(connection):\n while True:\n send_data_stream_position(connection)\n sleep(1 / drone['request_data_stream_position']['req_message_rate'])\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2 and len(sys.argv) != 3:\n print_usage(\"\")\n\n connection_string = sys.argv[1]\n baud = sys.argv[2] if len(sys.argv) == 3 else None\n drone_system = 1 # MAVLINK system id for the drone. Id 1 is the same as the drone.\n drone_component = 1 # MAVLINK component id for the drone. Id 1 is the autopilot.\n the_connection_drone = init_mavlink(drone_system, drone_component, connection_string, baud)\n\n gcs_system = 255 # MAVLINK system id for the GCS. Id 255 is the usual for GCSs.\n gcs_component = 190 # MAVLINK component id for the GCS. Id 190 is used for the GCS.\n the_connection_gcs = init_mavlink(gcs_system, gcs_component, 'udpout:localhost:12345', baud)\n\n module_system = 1 # MAVLINK system id for the module. Id 1 is the same as the drone.\n module_component = 99 # MAVLINK component id for the module. Id 99 is for private user defined use.\n\n # Defining drone information\n drone = {\n 'lat': 63.1, # degrees\n 'lon': 12.0, # degrees\n 'alt': 50, # m\n 'vx': 20, # cm/s\n 'vy': 95, # cm/s\n 'vz': 0, # cm/s\n 'active': False,\n 'request_data_stream_position': {\n 'request_system': None, # What system to send gps information.\n 'request_component': None, # What component to send gps information.\n 'req_message_rate': None # How often to send gps information in hertz.\n }\n }\n\n last_sheep_rtt_seq = -1\n test_timeout = 5 # 5 sec timeout\n\n # Testing sending heartbeat\n print('\\n\\n1. ##############################################################################')\n print('Sending heartbeat to module.')\n send_heartbeat(the_connection_drone)\n print('Waiting for heartbeat from module...')\n msg = the_connection_drone.recv_match(type='HEARTBEAT', blocking=True, timeout=test_timeout)\n if msg is None:\n print('NOT OK! No heartbeat received from module.')\n the_connection_drone.close()\n the_connection_gcs.close()\n exit()\n print('OK! Received heartbeat from module.')\n\n # Starting heartbeat thread\n print('\\n\\n!. ##############################################################################')\n print('Starting heartbeat thread. Interval: 1s')\n t_heartbeat = threading.Thread(target=heartbeat_thread, args=(the_connection_drone, 1.0), daemon=True)\n t_heartbeat.start()\n\n print('\\n\\n!. ##############################################################################')\n print('Changing drone MAV_STATE to MAV_STATE_ACTIVE.')\n drone['active'] = True\n\n # Checking if module requests 'DATA_STREAM_POSITION'\n print('\\n\\n2. ##############################################################################')\n print(\"Checking if module requests 'DATA_STREAM_POSITION' with 'MAV_DATA_STREAM_POSITION\")\n\n msg = the_connection_drone.recv_match(type='REQUEST_DATA_STREAM', blocking=True, timeout=test_timeout)\n print(msg)\n if msg is None:\n print('NOT OK! No \\'REQUEST_DATA_STREAM\\' with \\'MAV_DATA_STREAM_POSITION\\'received from module.')\n the_connection_drone.close()\n the_connection_gcs.close()\n exit()\n elif msg.req_stream_id == mavutil.mavlink.MAV_DATA_STREAM_POSITION and \\\n msg.target_system == drone_system and \\\n msg.target_component in (drone_component, 0) and \\\n msg.start_stop == 1:\n print('OK! Received correct \\'REQUEST_DATA_STREAM\\' from module.')\n\n drone['request_data_stream_position'] = {\n 'request_system': the_connection_drone.target_system, # What system to send gps information.\n 'request_component': the_connection_drone.target_component, # What component to send gps information.\n 'req_message_rate': msg.req_message_rate # How often to send gps information in hertz.\n }\n else:\n print('NOT OK! Received \\'REQUEST_DATA_STREAM\\' is not correct. Received:')\n print(msg)\n the_connection_drone.close()\n the_connection_gcs.close()\n exit()\n\n # Starting global_position_int thread.\n print('\\n\\n!. ##############################################################################')\n print('Starting global_position_int thread. Target system: {}, Target component: {}, interval: {}'.format(\n drone['request_data_stream_position']['request_system'],\n drone['request_data_stream_position']['request_component'],\n drone['request_data_stream_position']['req_message_rate']))\n t_global_position_int = threading.Thread(target=global_position_int_thread, args=(the_connection_drone,), daemon=True)\n t_global_position_int.start()\n\n # Checking if module sends 'SHEEP_RTT_DATA'.\n print('\\n\\n3. ##############################################################################')\n print(\"Checking if module sends 'SHEEP_RTT_DATA' or encapsulated 'SHEEP_RTT_DATA'.\")\n\n msg = the_connection_drone.recv_match(type=['SHEEP_RTT_DATA', 'DATA64'], blocking=True, timeout=test_timeout*5)\n if msg is None:\n print('NOT OK! No SHEEP_RTT_DATA or encapsulated SHEEP_RTT_DATA received.')\n the_connection_drone.close()\n the_connection_drone.close()\n exit()\n elif msg.name == 'SHEEP_RTT_DATA':\n last_sheep_rtt_seq = msg.seq\n print('OK! SHEEP_RTT_DATA received.')\n\n # Send the sheepRTT ack packet directly.\n msg_ack = the_connection_gcs.mav.sheep_rtt_ack_encode(msg.seq)\n the_connection_drone.mav.send(msg_ack)\n print('Sending SHEEP_RTT_ACK to module.')\n elif msg.name == 'DATA64' and msg.type == 129:\n msg = the_connection_gcs.mav.parse_char(msg.data[0:msg.len]) # Unpack encapsulated sheepRTT data.\n last_sheep_rtt_seq = msg.seq\n print(msg)\n\n print('OK! Encapsulated SHEEP_RTT_DATA received.')\n\n # Pack sheepRTT ack packet inside a data16 packet and send it. With zero padding.\n sheep_rtt_ack_packet = the_connection_gcs.mav.sheep_rtt_ack_encode(msg.seq).pack(the_connection_gcs.mav) + b'\\x00\\x00\\x00'\n\n msg_ack = the_connection_gcs.mav.data16_encode(130, len(sheep_rtt_ack_packet) - 3, sheep_rtt_ack_packet)\n the_connection_drone.mav.send(msg_ack)\n print('Sending encapsulated SHEEP_RTT_ACK to module.')\n\n # Avoid errors from an earlier SHEEP_RTT_DATA\n for i in range(10):\n msg = the_connection_drone.recv_match(type=['SHEEP_RTT_DATA', 'DATA64'], blocking=True, timeout=0.2)\n\n # Checking if module sends encapsulated 'SHEEP_RTT_DATA'.\n print('\\n\\n4. ##############################################################################')\n print(\"Checking if module increments 'SHEEP_RTT_DATA' seq after ack.\")\n\n msg = the_connection_drone.recv_match(type=['SHEEP_RTT_DATA', 'DATA64'], blocking=True, timeout=test_timeout*5)\n if msg is None:\n print('NOT OK! No encapsulated SHEEP_RTT_DATA received.')\n the_connection_drone.close()\n the_connection_gcs.close()\n exit()\n elif msg.name == 'SHEEP_RTT_DATA':\n print('SHEEP_RTT_DATA received.')\n elif msg.name == 'DATA64' and msg.type == 129:\n msg = the_connection_gcs.mav.parse_char(msg.data[0:msg.len]) # Unpack encapsulated sheepRTT data.\n print('Encapsulated SHEEP_RTT_DATA received.')\n\n if msg.seq == last_sheep_rtt_seq + 1:\n print('OK! SHEEP_RTT_DATA seq incremented.')\n else:\n print('NOT OK! SHEEP_RTT_DATA seq incremented. Expected: {}, received: {}'.format(last_sheep_rtt_seq, msg.seq))\n the_connection_drone.close()\n the_connection_gcs.close()\n exit()\n\n param_count = None\n\n # Getting module parameter by id.\n print('\\n\\n5. ##############################################################################')\n print(\"Getting module parameter by id\")\n # Start parameter related testing\n for t in range(3):\n the_connection_drone.mav.param_request_read_send(1, 99, str.encode('1 vector weight'), -1) # Test get parameter by id\n msg = the_connection_drone.recv_match(type='PARAM_VALUE', blocking=True, timeout=test_timeout)\n if msg is not None:\n break\n\n if msg is None:\n print('NOT OK! PARAM_VALUE not received.')\n the_connection_drone.close()\n the_connection_gcs.close()\n exit()\n elif msg.param_id != '1 vector weight' or \\\n msg.param_value != 0.0 or \\\n msg.param_type != mavutil.mavlink.MAV_PARAM_TYPE_INT32 or \\\n msg.param_index != 1:\n print('NOT OK! PARAM_VALUE contains wrong values. Expected:\\n '\n 'PARAM_VALUE {param_id : 1 vector weight, param_value : 0.0, param_type : 6, param_count : ??, param_index : 1}\\n'\n 'Received:\\n', msg)\n the_connection_drone.close()\n the_connection_gcs.close()\n exit()\n print('OK! Correct PARAM_VALUE received.')\n param_count = msg.param_count\n\n # Getting module parameter by index.\n print('\\n\\n6. ##############################################################################')\n print(\"Getting module parameter by index\")\n\n for t in range(3):\n the_connection_drone.mav.param_request_read_send(1, 99, str.encode(''), 1) # Test get parameter by index\n msg = the_connection_drone.recv_match(type='PARAM_VALUE', blocking=True, timeout=test_timeout)\n if msg is not None:\n break\n\n if msg is None:\n print('NOT OK! PARAM_VALUE not received.')\n the_connection_drone.close()\n the_connection_gcs.close()\n exit()\n elif msg.param_id != '1 vector weight' or \\\n msg.param_value != 0.0 or \\\n msg.param_type != mavutil.mavlink.MAV_PARAM_TYPE_INT32 or \\\n msg.param_count != param_count or \\\n msg.param_index != 1:\n print('NOT OK! PARAM_VALUE contains wrong values. Expected:\\n '\n 'PARAM_VALUE {param_id : 1 vector weight, param_value : 0.0, param_type : 6, param_count : ', param_count, ', param_index : 1}\\n'\n 'Received:\\n', msg)\n\n the_connection_drone.close()\n the_connection_gcs.close()\n exit()\n print('OK! Correct PARAM_VALUE received.')\n\n # Getting all module parameters.\n print('\\n\\n7. ##############################################################################')\n print(\"Getting all module parameters.\")\n\n for t in range(3):\n the_connection_drone.mav.param_request_list_send(1, 99) # Test get all parameter\n msg = the_connection_drone.recv_match(type='PARAM_VALUE', blocking=True, timeout=test_timeout)\n if msg is not None:\n break\n\n if msg is None:\n print('NOT OK! PARAM_VALUE not received.')\n the_connection_drone.close()\n the_connection_gcs.close()\n exit()\n else:\n values_received = 1\n values_count = msg.param_count\n print(msg)\n\n for i in range(values_received, values_count):\n msg = the_connection_drone.recv_match(type='PARAM_VALUE', blocking=True, timeout=test_timeout)\n print(msg)\n values_received += 1\n sleep(0.02)\n\n if values_received != values_count:\n print('NOT OK! Received {} of {} values.'.format(values_received, values_count))\n the_connection_drone.close()\n the_connection_gcs.close()\n exit()\n\n print('OK! All PARAM_VALUEs received.')\n\n # Setting module parameter.\n print('\\n\\n8 .##############################################################################')\n print(\"Setting module parameter\")\n for t in range(3):\n the_connection_drone.mav.param_set_send(1, 99, str.encode('packet count'), 1, mavutil.mavlink.MAV_PARAM_TYPE_INT32) # Test set a single parameter\n msg = the_connection_drone.recv_match(type='PARAM_VALUE', blocking=True, timeout=test_timeout)\n if msg is not None:\n break\n\n if msg is None:\n print('NOT OK! PARAM_VALUE not received.')\n the_connection_drone.close()\n the_connection_gcs.close()\n exit()\n elif msg.param_id != 'packet count' or \\\n msg.param_value != 5.605193857299268e-45 or \\\n msg.param_type != mavutil.mavlink.MAV_PARAM_TYPE_INT32 or \\\n msg.param_count != param_count or \\\n msg.param_index != 13:\n print('NOT OK! PARAM_VALUE contains wrong values. Expected:\\n '\n 'PARAM_VALUE {param_id : packet count, param_value : 1.0, param_type : 6, param_count : ', param_count, ', param_index : 13}\\n'\n 'Received:\\n', msg)\n\n the_connection_drone.close()\n the_connection_gcs.close()\n exit()\n print('OK! Correct value set and PARAM_VALUE received.')\n", "repo_name": "trygve55/sheep-2021-emulator", "sub_path": "module_tests.py", "file_name": "module_tests.py", "file_ext": "py", "file_size_in_byte": 14305, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "pymavlink.mavutil.mavlink", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 10, "usage_type": "name"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 11, "usage_type": "name"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 12, "usage_type": "name"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 14, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 14, "usage_type": "name"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 15, "usage_type": "name"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 18, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 18, "usage_type": "name"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 19, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 19, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 41, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 47, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 51, "usage_type": "attribute"}, {"api_name": "common.print_usage", "line_number": 52, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 54, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 55, "usage_type": "attribute"}, {"api_name": "common.init_mavlink", "line_number": 58, "usage_type": "call"}, {"api_name": "common.init_mavlink", "line_number": 62, "usage_type": "call"}, {"api_name": "threading.Thread", "line_number": 102, "usage_type": "call"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 120, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 120, "usage_type": "name"}, {"api_name": "threading.Thread", "line_number": 144, "usage_type": "call"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 226, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 226, "usage_type": "name"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 254, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 254, "usage_type": "name"}, {"api_name": "time.sleep", "line_number": 290, "usage_type": "call"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 304, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 304, "usage_type": "name"}, {"api_name": "pymavlink.mavutil.mavlink", "line_number": 316, "usage_type": "attribute"}, {"api_name": "pymavlink.mavutil", "line_number": 316, "usage_type": "name"}]} +{"seq_id": "40506207496", "text": "import openpyxl\n\nworkbook = openpyxl.load_workbook('cwd_file.xlxs')\n\n# a sinle xlxs file might contain multiple tabs and \n# or maybe you can call that a sheet\n# that little tab thingy in the left-bottom of your \n# excel file??? yeah, that's what it is... that's a sheet\n\n# you can select a sheet by\n\nsheet1 = workbook.get_sheet_by_name('sheet_name_i.e_sheet1')\n\n# get names of all sheets, no paras\n\nsheets = workbook.get_sheet_names()\n# print (sheets) will result in sheet1\n\n# let's suppose we get a sheet named sheet1, let's work with it\ncellOrColumnName = sheet1['firstColumnName']\n# [] bracket in sheet gets call objects\n# cell objects have value member variable\n# with all the contents of that cell. \n\nstore = str(cellOrColumnName.value)\n# cell() method retusn a Cell object from a sheet\n\n\n# EDITING EXCEL SPREADSHEETS\nwb = openpyxl.workbook()\n# create a workbook object\nwb.get_sheet_names()\n# ['Sheet'] -> default name prolly. \n# lets select this sheet and work with it\n\nsheet = wb.get_sheet_by_name('Sheet')\n\nsheet['A1'].value == None\n# returns true\n\nsheet['A1'] = 42\nsheet['A2'] = 'Hello'\n\n# save the sheet to some other directory. \n\nimport os\n\nos.chdir('c:\\\\Users\\\\UserName\\\\Documents')\nwb.save()\n\n# create another sheet\nsheet2 = wb.create_sheet()\nwb.get_sheet_names()\n\n# would returns 2 sheets now\n# [Sheet1, Sheet2]\n\nsheet2.title = 'New Sheet Of People Name'\nwb.get_sheet_names()\n\n# now it will return sheet1 and New Sheet of People Name\nwb.save('example.xlsx')\n# workbook saved\n\nwb.create_sheet(index=0, title='My Other Sheet')\nwb.save('Other Sheet.xlsx')\n# Now, this workbook would have first sheet named my other sheet\n# as you can see the first para index=0, represents first sheet index\n\n\n\n\n\n\n\n", "repo_name": "mujeebishaque/AutomateTheBoringStuffWithPython", "sub_path": "workingWithExcel.py", "file_name": "workingWithExcel.py", "file_ext": "py", "file_size_in_byte": 1710, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "76", "api": [{"api_name": "openpyxl.load_workbook", "line_number": 3, "usage_type": "call"}, {"api_name": "openpyxl.workbook", "line_number": 30, "usage_type": "call"}, {"api_name": "os.chdir", "line_number": 48, "usage_type": "call"}]} +{"seq_id": "9907686222", "text": "import structlog\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import scoped_session, sessionmaker, declarative_base\nfrom sqlalchemy_utils import database_exists, create_database\nfrom app.configs.config import get_environment_variables\n\nlogger = structlog.get_logger()\n\nenv = get_environment_variables()\n\nDATABASE_URL = f\"{env.DATABASE_DIALECT}://{env.DATABASE_USERNAME}:{env.DATABASE_PASSWORD}@{env.DATABASE_HOSTNAME}:{env.DATABASE_PORT}/{env.DATABASE_NAME}\"\n\nengine = create_engine(\n DATABASE_URL, echo=env.DEBUG_MODE, future=True\n)\n\nSessionLocal = sessionmaker(\n autocommit=False, autoflush=False, bind=engine\n)\n\nBase = declarative_base()\n\ndef get_db_connection():\n db = scoped_session(SessionLocal)\n try:\n yield db\n finally:\n db.close()\n\ndef init_db():\n if not database_exists(engine.url):\n logger.info('database does not exists creating new database')\n create_database(engine.url)\n else:\n logger.info('database exists omitting creation of new database')\n Base.metadata.create_all(bind=engine)\n", "repo_name": "Nomow/time-based-key-value-store", "sub_path": "app/utils/db/database.py", "file_name": "database.py", "file_ext": "py", "file_size_in_byte": 1068, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "structlog.get_logger", "line_number": 7, "usage_type": "call"}, {"api_name": "app.configs.config.get_environment_variables", "line_number": 9, "usage_type": "call"}, {"api_name": "sqlalchemy.create_engine", "line_number": 13, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.sessionmaker", "line_number": 17, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.declarative_base", "line_number": 21, "usage_type": "call"}, {"api_name": "sqlalchemy.orm.scoped_session", "line_number": 24, "usage_type": "call"}, {"api_name": "sqlalchemy_utils.database_exists", "line_number": 31, "usage_type": "call"}, {"api_name": "sqlalchemy_utils.create_database", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "25091197471", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('places', '0003_auto_20141203_1910'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='place',\n name='recommandTime',\n field=models.IntegerField(max_length=40, verbose_name=b'\\xe6\\x8e\\xa8\\xe8\\x8d\\x90\\xe6\\x97\\xb6\\xe9\\x95\\xbf'),\n preserve_default=True,\n ),\n ]\n", "repo_name": "curlylrt/testsite", "sub_path": "places/migrations/0004_auto_20141207_2007.py", "file_name": "0004_auto_20141207_2007.py", "file_ext": "py", "file_size_in_byte": 511, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 7, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 7, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterField", "line_number": 14, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 14, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "36803600176", "text": "# 2.6.6. Measuring objects properties: ndimage.measurements\n\nimport numpy as np\nfrom scipy import ndimage\nimport matplotlib.pyplot as plt\n\nnp.random.seed(1)\nn = 10\nl = 256\n\nim = np.zeros((l, l))\n\n# after creating im variable which holds 256*256 matrix of zeors is use to plot points in all matrix fields.\npoints = l*np.random.random((2, n**2))\n\n# made 1 at this point range\nim[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1\n\n# sigma is not only use to blure the image but also modify the outer circle of shape. like as in sigma provide a formula whcih use to show a multiple shapes in image but if you change this wiht random numbers you will get different shapes.\n# like change sigma value with sigma=5.2282277 then check or change with other value then chack again.\nim = ndimage.gaussian_filter(im, sigma=l/(4.*n))\n\nmask = im > im.mean()\n\n# this is use to set all 0 as background and rest of 1's are using to show the mask designing\nlabel_im, nb_labels = ndimage.label(mask)\n\nplt.figure(figsize=(9,3))\n\nplt.subplot(131)\nplt.imshow(im)\nplt.axis('off')\nplt.subplot(132)\nplt.imshow(mask, cmap=plt.cm.gray)\nplt.axis('off')\nplt.subplot(133)\nplt.imshow(label_im, cmap=plt.cm.nipy_spectral)\nplt.axis('off')\n\nplt.subplots_adjust(wspace=0.02, hspace=0.02, top=1, bottom=0, left=0, right=1)\nplt.show()", "repo_name": "heysushil/python_image_processing", "sub_path": "17.Measuring_objects_properties_using_ndimage_messering.py", "file_name": "17.Measuring_objects_properties_using_ndimage_messering.py", "file_ext": "py", "file_size_in_byte": 1306, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "76", "api": [{"api_name": "numpy.random.seed", "line_number": 7, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 7, "usage_type": "attribute"}, {"api_name": "numpy.zeros", "line_number": 11, "usage_type": "call"}, {"api_name": "numpy.random.random", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 14, "usage_type": "attribute"}, {"api_name": "numpy.int", "line_number": 17, "usage_type": "attribute"}, {"api_name": "scipy.ndimage.gaussian_filter", "line_number": 21, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 21, "usage_type": "name"}, {"api_name": "scipy.ndimage.label", "line_number": 26, "usage_type": "call"}, {"api_name": "scipy.ndimage", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 30, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 31, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 31, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 32, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 32, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 33, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 33, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 34, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 34, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 34, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 35, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplot", "line_number": 36, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 36, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 37, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 37, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.cm", "line_number": 37, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 38, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 38, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots_adjust", "line_number": 40, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 40, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "27223807945", "text": "import json\n\nkk = {}\nwith open(\"assets/kk_full.json\", \"r\") as kkfile:\n kk = json.load(kkfile)\n\n\nfor song_no, song in kk.items():\n lyrics: list = song[\"Song\"].split(\"\\n\\n\")\n print(\"song \", song_no)\n if len(lyrics) == 1:\n ll = lyrics[0].split(\"\\n\")\n x = len(ll)\n n = 1\n for y in range(4, len(ll) // 2):\n if len(ll) % y == 0:\n n = y\n break\n lyrics = [\"\\n\".join(ll[i : i + n]) for i in range(0, len(ll), n)]\n kk[song_no][\"Song\"] = \"\\n\\n\".join(lyrics)\n if lyrics[0].count(\"\\n\") > lyrics[1].count(\"\\n\") and not lyrics[1].startswith(\"1\"):\n print(song_no)\n x = lyrics[1].count(\"\\n\") + 1\n chorus_lyrics = lyrics[0].split(\"\\n\")[x:]\n chorus = \"\\n\".join(chorus_lyrics)\n lyrics[0] = \"\\n\".join(lyrics[0].split(\"\\n\")[:x])\n lyrics.insert(0, chorus)\n kk[song_no][\"Song\"] = \"\\n\\n\".join(lyrics)\nwith open(\"updated_kk.json\", \"w\") as newkk:\n json.dump(kk, newkk, ensure_ascii=False)\n\nprint(\"NEW FILE\")\nwith open(\"updated_kk.json\", \"r\") as kkfile:\n kk = json.load(kkfile)\n\n\nfor song_no, song in kk.items():\n lyrics: list = song[\"Song\"].split(\"\\n\\n\")\n if lyrics[0].count(\"\\n\") > lyrics[1].count(\"\\n\"):\n print(song_no)\n", "repo_name": "gijocode/Church_PPT_Utility", "sub_path": "cleaner.py", "file_name": "cleaner.py", "file_ext": "py", "file_size_in_byte": 1261, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "json.load", "line_number": 5, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 30, "usage_type": "call"}, {"api_name": "json.load", "line_number": 34, "usage_type": "call"}]} +{"seq_id": "31661148629", "text": "\nimport toml\n\n\nclass Extender:\n \"\"\"\n The base class for TidyPy configuration extenders.\n \"\"\"\n\n @classmethod\n def can_handle(cls, location):\n \"\"\"\n Indicates whether or not this Extender is capable of retrieving the\n specified location.\n\n :param location:\n a URI indicating where to retrieve the TidyPy configuration from\n :type location: str\n :rtype: bool\n \"\"\"\n\n raise NotImplementedError()\n\n @classmethod\n def retrieve(cls, location, project_path):\n \"\"\"\n Retrieves a TidyPy configuration from the specified location.\n\n :param location:\n a URI indicating where to retrieve the TidyPy configuration from\n :type location: str\n :param project_path: the full path to the project's base\n :type project_path: str\n :rtype: dict\n \"\"\"\n\n raise NotImplementedError()\n\n @classmethod\n def parse(cls, content, is_pyproject=False):\n \"\"\"\n A convenience method for parsing a TOML-serialized configuration.\n\n :param content: a TOML string containing a TidyPy configuration\n :type content: str\n :param is_pyproject:\n whether or not the content is (or resembles) a ``pyproject.toml``\n file, where the TidyPy configuration is located within a key named\n ``tool``.\n :type is_pyproject: bool\n :rtype: dict\n \"\"\"\n\n parsed = toml.loads(content)\n\n if is_pyproject:\n parsed = parsed.get('tool', {})\n parsed = parsed.get('tidypy', {})\n\n return parsed\n\n\nclass ExtenderError(Exception):\n \"\"\"\n The base class for all exceptions raised by an Extender during its\n operation.\n \"\"\"\n\n\nclass DoesNotExistError(ExtenderError):\n \"\"\"\n An exception indicating that the specified Extender does not exist in the\n current environment.\n \"\"\"\n\n", "repo_name": "jayclassless/tidypy", "sub_path": "src/tidypy/extenders/base.py", "file_name": "base.py", "file_ext": "py", "file_size_in_byte": 1915, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 33, "dataset": "github-code", "pt": "76", "api": [{"api_name": "toml.loads", "line_number": 54, "usage_type": "call"}]} +{"seq_id": "73594029365", "text": "#!/usr/bin/env python3\nimport sqlite3\n\ndef init(dbPath):\n print(\"[DB] Starting database\")\n global path\n path = dbPath\n prepare(\"gamedata.db\")\n c.execute(\"CREATE TABLE IF NOT EXISTS groups (userID INTEGER, posX INTEGER DEFAULT 0, posY INTEGER DEFAULT 0)\")\n end()\n\ndef prepare(dbName):\n global db\n global c\n db = sqlite3.connect(path + dbName)\n c = db.cursor()\n\ndef newGroup(username):\n print(\"[DB] Creating new group\")\n prepare(\"gamedata.db\")\n c.execute(\"INSERT INTO groups (userID) VALUES (\" + username + \")\")\n db.commit()\n db.close()\n\ndef end():\n try:\n db.commit()\n db.close()\n print(\"[DB] Closing the connection to the database was succesful\")\n return True\n except:\n print(\"[DB] Closing the connection to the database failed\")\n return False\n\nif __name__ == \"__main__\":\n print(\"This is a module you fool!\")\n", "repo_name": "Benjadahl/BenjaWorld", "sub_path": "dbHandler.py", "file_name": "dbHandler.py", "file_ext": "py", "file_size_in_byte": 904, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "sqlite3.connect", "line_number": 15, "usage_type": "call"}]} +{"seq_id": "9390116702", "text": "\"\"\"Server for creating Flask app and handling routes\"\"\"\n\nfrom flask import Flask, render_template\nfrom model import connect_to_db, db\n\napp = Flask(__name__)\n\n\n@app.route('/')\ndef display_homepage():\n \"\"\"Show homepage\"\"\"\n return render_template('index.html')\n\n\nif __name__ == \"__main__\":\n\n connect_to_db(app)\n app.run(debug=True)\n", "repo_name": "Nmargolis/landlord-data-sf", "sub_path": "server.py", "file_name": "server.py", "file_ext": "py", "file_size_in_byte": 341, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}, {"api_name": "model.connect_to_db", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "33853606353", "text": "#!/usr/bin/python\r\n# -*- coding: utf-8 -*-\r\n\r\n\r\n# import matplotlib.pylab as plt\r\nimport datetime as dt\r\nimport config\r\n\r\n\r\n# funkcja aktualizuje pliki z danymi statystycznymi użytkownika\r\n\r\ndef check_date():\r\n def update(cur_day: dt.date, last_day: dt.date, w_file: []):\r\n dif = (cur_day - last_day).days\r\n if dif > config.positions:\r\n w_file.clear()\r\n for d in range(config.positions):\r\n new_list = [f'{(cur_day - dt.timedelta(days=d)).isoformat()} 0 0 0 0']\r\n w_file.extend(new_list)\r\n else:\r\n while dif > 0:\r\n dif -= 1\r\n new_list = [f'{(cur_day - dt.timedelta(days=dif)).isoformat()} 0 0 0 0']\r\n w_file = new_list + w_file\r\n w_file = w_file[:config.positions]\r\n return w_file\r\n\r\n def create_new(cur_day: dt.date, w_file: []):\r\n w_file.clear()\r\n for d in range(config.positions):\r\n new_list = [f'{(cur_day - dt.timedelta(days=d)).isoformat()} 0 0 0 0']\r\n w_file.extend(new_list)\r\n return w_file\r\n\r\n with open(f'statistics\\\\{config.nick}.txt', 'r', encoding='utf-8') as f:\r\n user_stats = f.read().split('\\n')\r\n\r\n today = dt.date.today()\r\n if user_stats[0] == '':\r\n user_stats = create_new(today, user_stats)\r\n else:\r\n temp_var = user_stats[0].split(' ')[0].split('-')\r\n previous = dt.date(int(temp_var[0]), int(temp_var[1]), int(temp_var[2]))\r\n if previous > today:\r\n # data ostatniego logowania znajduje się chronologicznie poźniej niż data dzisiejsza (wg systemu)\r\n print('error occurred due to time travel - check your system time')\r\n elif previous < today:\r\n user_stats = update(today, previous, user_stats)\r\n\r\n new_part = ''\r\n for i in user_stats:\r\n new_part += f'{i}\\n'\r\n\r\n with open(f'statistics\\\\{config.nick}.txt', 'w', encoding='utf-8') as f:\r\n f.write(new_part[:-1])\r\n return\r\n\r\n\r\n# funkcja wyświetla wykres punktów z ostatnich 7 dni\r\n\r\n# def show_plot():\r\n# with open(f'statistics\\\\{config.nick}.txt', 'r', encoding='utf-8') as f:\r\n# src = f.read().split('\\n')\r\n# dates = []\r\n# pol_cor = []\r\n# eng_cor = []\r\n# for e in src[:7]:\r\n# i = e.split(' ')\r\n# temp_var = i[0].split('-')\r\n# temp_date = dt.date(int(temp_var[0]), int(temp_var[1]), int(temp_var[2]))\r\n# i[0] = temp_date.strftime('%b %d.')\r\n# dates += [i[0]]\r\n# pol_cor += [int(i[3])]\r\n# eng_cor += [int(i[4])]\r\n#\r\n# plt.plot(dates, pol_cor, 'bo-', dates, eng_cor, 'ro-')\r\n# plt.ylim(0)\r\n# plt.grid(True, alpha=0.2)\r\n# plt.title('wykaz uzyskanych punktów za gry:')\r\n# plt.legend(['w wersji polskiej', 'w wersji angielskiej'], loc='upper left')\r\n# plt.show()\r\n#\r\n# return\r\n\r\n\r\n# funkcja zwraca zmienną string ze sformatowanym tekstem\r\n# prezentującym dane statystyczne - wystarczy go wyświetlić\r\n\r\ndef show_stats_full():\r\n with open(f'statistics\\\\{config.nick}.txt', 'r', encoding='utf-8') as f:\r\n user_stats = f.read().split('\\n')\r\n ret_file = 'data'.center(18, ' ') + 'liczba'.center(16, ' ') + 'liczba'.center(28, ' ') +\\\r\n 'liczba odgadniętych słów'.center(28, ' ') + '\\n' +\\\r\n 'rozgrywek'.center(13, ' ') + 'rozgrywek'.center(11, ' ') + 'wyświetlony słów'.center(20, ' ') +\\\r\n 'po polsku'.center(13, ' ') + '|' + 'po angielsku'.center(14, ' ') + '\\n\\n'\r\n for i in user_stats:\r\n temp_var = i.split(' ')\r\n ret_file += f'{temp_var[0].center(11)}{temp_var[1].center(22)}' \\\r\n f'{temp_var[2].center(28)}{temp_var[3].center(18)} {temp_var[4].center(21)}\\n'\r\n # print(ret_file)\r\n return ret_file\r\n\r\n\r\n# funkcja aktualizująca statystyki po każdej grze\r\n# 1. argument - liczba sesji (domyślnie 1)\r\n# 2. argument - liczba wyświetlonych par do odgadnięcia\r\n# 3. argument - liczba poprawnie odgadniętych par w trakcie jednej gry w wersji polskiej\r\n# 4. argument - liczba poprawnie odgadniętych par w trakcie jednej gry w wersji angielskiej\r\n\r\ndef after_session(sessions: int = 1, shown_words: int = 0, pol_correct: int = 0, eng_correct: int = 0):\r\n with open(f'statistics\\\\{config.nick}.txt', 'r', encoding='utf-8') as f:\r\n user_stats = f.read().split('\\n', 1)\r\n\r\n temp_var = user_stats[0].split(' ')\r\n temp_var[1] = f'{sessions + int(temp_var[1])}'\r\n temp_var[2] = f'{shown_words + int(temp_var[2])}'\r\n temp_var[3] = f'{pol_correct + int(temp_var[3])}'\r\n temp_var[4] = f'{eng_correct + int(temp_var[4])}'\r\n user_stats[0] = f'{temp_var[0]} {temp_var[1]} {temp_var[2]} {temp_var[3]} {temp_var[4]}\\n'\r\n\r\n new_part = f'{user_stats[0]}{user_stats[1]}'\r\n with open(f'statistics\\\\{config.nick}.txt', 'w', encoding='utf-8') as f:\r\n f.write(new_part)\r\n return\r\n\r\n\r\n# if __name__ == \"__main__\":\r\n# pass\r\n", "repo_name": "Jakub-Jalt/NPG_Fiszki", "sub_path": "mode_stat.py", "file_name": "mode_stat.py", "file_ext": "py", "file_size_in_byte": 4945, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "77", "api": [{"api_name": "datetime.date", "line_number": 13, "usage_type": "attribute"}, {"api_name": "config.positions", "line_number": 15, "usage_type": "attribute"}, {"api_name": "config.positions", "line_number": 17, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.timedelta", "line_number": 23, "usage_type": "call"}, {"api_name": "config.positions", "line_number": 25, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 28, "usage_type": "attribute"}, {"api_name": "config.positions", "line_number": 30, "usage_type": "attribute"}, {"api_name": "datetime.timedelta", "line_number": 31, "usage_type": "call"}, {"api_name": "config.nick", "line_number": 35, "usage_type": "attribute"}, {"api_name": "datetime.date.today", "line_number": 38, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 38, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 43, "usage_type": "call"}, {"api_name": "config.nick", "line_number": 54, "usage_type": "attribute"}, {"api_name": "config.nick", "line_number": 90, "usage_type": "attribute"}, {"api_name": "config.nick", "line_number": 111, "usage_type": "attribute"}, {"api_name": "config.nick", "line_number": 122, "usage_type": "attribute"}]} +{"seq_id": "21078244851", "text": "import json\nimport random\nimport os\n\n\ndef read_jsonl(file_path, sample_rate=1.0):\n assert os.path.exists(file_path)\n with open(file_path) as r:\n for line in r:\n if random.random() <= sample_rate:\n yield json.loads(line)\n\n\ndef write_jsonl(records, file_path):\n with open(file_path, \"w\") as w:\n for r in records:\n w.write(json.dumps(r, ensure_ascii=False).strip() + \"\\n\")\n\n\ndef read_lines(file_path):\n lines = list()\n with open(file_path, \"r\") as r:\n for line in r:\n lines.append(line.strip())\n return lines\n", "repo_name": "IlyaGusev/rudetox", "sub_path": "rudetox/util/io.py", "file_name": "io.py", "file_ext": "py", "file_size_in_byte": 593, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.path.exists", "line_number": 7, "usage_type": "call"}, {"api_name": "os.path", "line_number": 7, "usage_type": "attribute"}, {"api_name": "random.random", "line_number": 10, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 11, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 17, "usage_type": "call"}]} +{"seq_id": "24583147926", "text": "import cv2, torch\nfrom model.dataset import Vocabulary\nfrom torchvision import transforms\nimport model.models as models\n\ntransform = transforms.Compose(\n [transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]\n)\nvocab = Vocabulary(\"./model/meta/data_props.pkl\")\nhidden_size = 256\nmodel = models.Transcriptor(hidden_size, len(vocab)).to(models.device)\nmodel.load_state_dict(torch.load(\"./model/epoch_50.pt\"))\nmodel.eval()\n\n\ndef predict(img_path, length=50):\n\timg = 255 - cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)\n\timg = transform(img).to(models.device).unsqueeze(0)\n\touts = model(img, length=length)\n\tpred = vocab.decode(outs.argmax(dim=1)[0].cpu().numpy())\n\treturn pred.replace(r\" \\eos\", \"\")\n", "repo_name": "calvinyusno2359/50039-im2latex", "sub_path": "predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 702, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "torchvision.transforms.Compose", "line_number": 6, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 6, "usage_type": "name"}, {"api_name": "torchvision.transforms.ToTensor", "line_number": 7, "usage_type": "call"}, {"api_name": "torchvision.transforms", "line_number": 7, "usage_type": "name"}, {"api_name": "torchvision.transforms.Normalize", "line_number": 7, "usage_type": "call"}, {"api_name": "model.dataset.Vocabulary", "line_number": 9, "usage_type": "call"}, {"api_name": "model.dataset", "line_number": 11, "usage_type": "name"}, {"api_name": "model.models.Transcriptor", "line_number": 11, "usage_type": "call"}, {"api_name": "model.models", "line_number": 11, "usage_type": "name"}, {"api_name": "model.models.device", "line_number": 11, "usage_type": "attribute"}, {"api_name": "model.dataset.load_state_dict", "line_number": 12, "usage_type": "call"}, {"api_name": "model.dataset", "line_number": 12, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 12, "usage_type": "call"}, {"api_name": "model.dataset.eval", "line_number": 13, "usage_type": "call"}, {"api_name": "model.dataset", "line_number": 13, "usage_type": "name"}, {"api_name": "cv2.imread", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.IMREAD_GRAYSCALE", "line_number": 17, "usage_type": "attribute"}, {"api_name": "model.models.device", "line_number": 18, "usage_type": "attribute"}, {"api_name": "model.models", "line_number": 18, "usage_type": "name"}, {"api_name": "model.dataset", "line_number": 19, "usage_type": "call"}]} +{"seq_id": "9839462511", "text": "import logging\n\nimport numpy as np\n\nfrom easyfl.simulation.mobile_ratio import MOBILE_RATIO\nfrom easyfl.datasets.simulation import equal_division\n\nlogger = logging.getLogger(__name__)\n\nSIMULATE_ISO = \"iso\" # isometric sleep time distribution among selected clients\nSIMULATE_DIR = \"dir\" # use symmetric dirichlet process to sample sleep time heterogenous\nSIMULATE_REAL = \"real\" # use real speed ratio of main stream smartphones to simulate sleep time heterogenous\n\n\ndef assign_value_to_group(groups, values):\n assert len(groups) == len(values)\n result = []\n for i in range(len(groups)):\n result.extend([values[i]] * len(groups[i]))\n return result\n\n\ndef sample_real_ratio(num_values):\n value_pool = list(MOBILE_RATIO.values())\n idxs = np.random.randint(0, len(value_pool), size=num_values)\n return np.array([value_pool[i] for i in idxs]).astype(float)\n\n\ndef resource_hetero_simulation(fraction, hetero_type, sleep_group_num, level, total_time, num_clients):\n \"\"\"Simulated resource heterogeneous by add sleeping time to clients.\n\n Args:\n fraction (float): The fraction of clients attending heterogeneous simulation.\n hetero_type (str): The type of heterogeneous simulation, options: iso, dir or real.\n sleep_group_num (int): The number of groups with different sleep time.\n level (int): The level of heterogeneous (0-5), 0 means no heterogeneous among clients.\n total_time (float): The total sleep time of all clients.\n num_clients (int): The total number of clients.\n\n Returns:\n list[float]: A list of sleep time with distribution according to heterogeneous type.\n \"\"\"\n sleep_clients = int(fraction * num_clients)\n unsleep_clients = [0] * (num_clients - sleep_clients)\n sleep_group_num = sleep_group_num\n if sleep_group_num > sleep_clients:\n logger.warning(\"sleep_group_num {} is more than sleep_clients number {}, \\\n so we set sleep_group_num to sleep_clients\".format(sleep_group_num, sleep_clients))\n sleep_group_num = sleep_clients\n groups, _ = equal_division(sleep_group_num, np.arange(sleep_clients))\n if level == 0:\n distribution = np.array([1] * sleep_clients)\n elif hetero_type == SIMULATE_DIR:\n alpha = 1 / (level * level)\n values = np.random.dirichlet(np.repeat(alpha, sleep_group_num))\n distribution = assign_value_to_group(groups, values)\n elif hetero_type == SIMULATE_ISO:\n if level > 5:\n raise ValueError(\"level cannot be more than 5\")\n begin = 0.5 - level * 0.1\n end = 0.5 + level * 0.1\n values = np.arange(begin, end, (end - begin) / sleep_group_num)\n distribution = assign_value_to_group(groups, values)\n elif hetero_type == SIMULATE_REAL:\n values = sample_real_ratio(sleep_group_num)\n distribution = assign_value_to_group(groups, values)\n else:\n raise ValueError(\"sleep type not supported, please use either dir or iso\")\n distribution += unsleep_clients\n np.random.shuffle(distribution)\n return distribution / sum(distribution) * total_time\n", "repo_name": "EasyFL-AI/EasyFL", "sub_path": "easyfl/simulation/system_hetero.py", "file_name": "system_hetero.py", "file_ext": "py", "file_size_in_byte": 3104, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 256, "dataset": "github-code", "pt": "76", "api": [{"api_name": "logging.getLogger", "line_number": 8, "usage_type": "call"}, {"api_name": "easyfl.simulation.mobile_ratio.MOBILE_RATIO.values", "line_number": 24, "usage_type": "call"}, {"api_name": "easyfl.simulation.mobile_ratio.MOBILE_RATIO", "line_number": 24, "usage_type": "name"}, {"api_name": "numpy.random.randint", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.array", "line_number": 26, "usage_type": "call"}, {"api_name": "easyfl.datasets.simulation.equal_division", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 50, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 52, "usage_type": "call"}, {"api_name": "numpy.random.dirichlet", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 55, "usage_type": "attribute"}, {"api_name": "numpy.repeat", "line_number": 55, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 62, "usage_type": "call"}, {"api_name": "numpy.random.shuffle", "line_number": 70, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 70, "usage_type": "attribute"}]} +{"seq_id": "37883613658", "text": "import os\nimport tweepy\nimport requests\nimport base64\nfrom dotenv import load_dotenv\n\nload_dotenv()\nconsumer_key = os.getenv(\"CONSUMER_KEY\")\nconsumer_secret = os.getenv(\"CONSUMER_SECRET\")\naccess_token = os.getenv(\"ACCESS_TOKEN\")\naccess_token_secret = os.getenv(\"ACCESS_TOKEN_SECRET\")\n\n# Authenticate to Twitter\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\napi = tweepy.API(auth)\n\n\n#Reformat the keys and encode them\nkey_secret = '{}:{}'.format(consumer_key, consumer_secret).encode('ascii')\n#Transform from bytes to bytes that can be printed\nb64_encoded_key = base64.b64encode(key_secret)\n#Transform from bytes back into Unicode\nb64_encoded_key = b64_encoded_key.decode('ascii')\n\nbase_url = 'https://api.twitter.com/'\nauth_url = '{}oauth2/token'.format(base_url)\nauth_headers = {\n 'Authorization': 'Basic {}'.format(b64_encoded_key),\n 'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8'\n}\nauth_data = {\n 'grant_type': 'client_credentials'\n}\nauth_resp = requests.post(auth_url, headers=auth_headers, data=auth_data)\nprint(auth_resp.status_code)\naccess_token = auth_resp.json()['access_token']\n#print(access_token)\n\nfile = open('C:/Temp/th.jpg', 'rb')\ndata = file.read()\nresource_url = 'https://upload.twitter.com/1.1/media/upload.json'\nupload_image = {\n 'media':data,\n 'media_category':'tweet_image'}\n \nimage_headers = {\n 'Authorization': 'Bearer {}'.format(access_token) \n}\n\nmedia_id = requests.post(resource_url, headers=image_headers, params=upload_image)\nprint(media_id.status_code)\n\n\"\"\"\ntweet_meta={ \"media_id\": media_id,\n \"alt_text\": {\n \"text\":\"tlogo\" \n }}\nmetadata_url = 'https://upload.twitter.com/1.1/media/metadata/create.json' \nmetadata_resp = requests.post(metadata_url, params=tweet_meta, headers=auth_data)\nprint(metadata_resp.status_code)\n\n#tweet = {'status': 'just test #tweepy', 'media_ids': media_id}\ntweet = {'status': 'just test #tweepy'}\npost_url = 'https://api.twitter.com/1.1/statuses/update.json' \npost_resp = requests.post(post_url, params=tweet, headers=image_headers)\nprint(post_resp.status_code)\n\"\"\"\n\n#try:\n# api.verify_credentials()\n# print(\"Authentication OK\")\n# api.update_status(\"just test.. #tweepy\")\n#except:\n# print(\"Error during authentication\")\n", "repo_name": "tomfutago/gangstabet-feed", "sub_path": "tests/tweepy_media_upload_test.py", "file_name": "tweepy_media_upload_test.py", "file_ext": "py", "file_size_in_byte": 2312, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 7, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 8, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 9, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 10, "usage_type": "call"}, {"api_name": "os.getenv", "line_number": 11, "usage_type": "call"}, {"api_name": "tweepy.OAuthHandler", "line_number": 14, "usage_type": "call"}, {"api_name": "tweepy.API", "line_number": 16, "usage_type": "call"}, {"api_name": "base64.b64encode", "line_number": 22, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 35, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 51, "usage_type": "call"}]} +{"seq_id": "20973248771", "text": "import json\nimport torch\nimport logging\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch.nn.functional as F\n\nfrom PIL import Image\nfrom torch import nn\nfrom torchvision import models\nfrom argparse import ArgumentParser\n\nlogging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M')\n\nlogger = logging.getLogger(__file__)\n\nparser = ArgumentParser(description='Process solution arguments.')\nparser.add_argument('--device', type=str, default='cpu', help='Device used for training (cuda or cpu)')\nparser.add_argument('--image-path', type=str, help='Path to image')\nparser.add_argument('--class-file-path', type=str, help='Path to file mapping classes to names',\n default='cat_to_name.json')\nparser.add_argument('--model-path', type=str, help='Path to model checkpoint')\nparser.add_argument('--k', type=int, help='Number of elements to be used by top-k', default=1)\n\n\ndef get_cat_to_name(path='cat_to_name.json'):\n cat_to_name = None\n try:\n path_list = path.split('.')\n if len(path_list) == 0:\n raise ValueError('Invalid path')\n elif len(path_list) > 0 and path_list[-1] != 'json':\n raise ValueError('Invalid file format. Json type should be selected')\n else:\n with open(path, 'r') as f:\n cat_to_name = json.load(f)\n except FileNotFoundError:\n return cat_to_name\n\n return cat_to_name\n\n\nclass DeepFeedForwardNet(nn.Module):\n def __init__(self, input_shape, layers=2, units=128, dropout=0.5):\n super(DeepFeedForwardNet, self).__init__()\n self.input_shape = input_shape\n self.input = nn.Linear(input_shape, units)\n self.out = nn.Linear(units, 102)\n if dropout is not None:\n self.dropout = nn.Dropout(dropout)\n else:\n self.dropout = dropout\n\n self.layers = list()\n for i in range(layers):\n self.layers.append(nn.Linear(units, units))\n\n self.layers = nn.ModuleList(self.layers)\n\n def forward(self, x):\n if self.dropout is not None:\n y = F.relu(self.dropout(self.input(x)))\n\n for layer in self.layers:\n y = F.relu(self.dropout(layer(y)))\n else:\n y = F.relu(self.input(x))\n\n for layer in self.layers:\n y = F.relu(layer(y))\n\n out = self.out(y)\n\n return out\n\n\ndef load_model_checkpoint_only(model_path_, device_='cpu'):\n logger.info('Loading checkpoint located at: {}'.format(model_path_))\n\n parameters = model_path_.split('/')\n\n if len(parameters) == 0:\n raise ValueError('Wrong model dir format')\n\n parameters = parameters[-1].split('-')\n\n name = parameters[0] # Pretrained model name.\n layers = int(parameters[1].replace('dnn', '')) # Number of layers in the checkpoint name.\n hidden_units = int(parameters[2].split('_')[0])\n\n # checkpoint = torch.load(model_dir_)\n checkpoint = torch.load(model_path_, map_location=lambda storage, loc: storage)\n model_checkpoint = checkpoint['model']\n net = models.__dict__[name](pretrained=True)\n\n if 'vgg' in name:\n input_features = 25088 # VGG input\n elif 'resnet' in name:\n input_features = 512 # Resnet input\n else:\n input_features = 9216 # Alexnet input\n\n dff_net = DeepFeedForwardNet(input_features, layers, hidden_units)\n # dff_net = dff_net.to(device_)\n\n for p in net.parameters():\n p.requires_grad = False\n\n if 'resnet' in name:\n net.fc = dff_net\n else:\n net.classifier = dff_net\n\n net = net.to(device_)\n net.load_state_dict(model_checkpoint)\n net.class_to_index = checkpoint['classes']\n\n logger.info('Model loaded')\n\n return net\n\n\ndef resize_and_keep_ar(pil_image, smaller_side):\n w, h = pil_image.size\n\n if w < h:\n new_w = smaller_side\n w_ratio = (new_w / float(pil_image.size[0]))\n new_h = int((float(pil_image.size[1]) * float(w_ratio)))\n else:\n new_h = smaller_side\n h_ratio = (new_h / float(pil_image.size[1]))\n new_w = int((float(pil_image.size[0]) * float(h_ratio)))\n\n return pil_image.resize((new_w, new_h))\n\n\ndef center_square_crop(pil_image, size):\n width, height = pil_image.size\n\n left = (width - size) / 2\n top = (height - size) / 2\n right = (width + size) / 2\n bottom = (height + size) / 2\n\n return pil_image.crop((left, top, right, bottom))\n\n\ndef normalize_image(pil_image):\n image_as_array = np.array(pil_image)\n img = torch.from_numpy(image_as_array.transpose((2, 0, 1)))\n\n if isinstance(img, torch.ByteTensor):\n img = img.float().div(255)\n\n for t, m, s in zip(img, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]):\n t.sub_(m).div_(s)\n\n return img.numpy()\n\n\ndef process_image(image):\n ''' Scales, crops, and normalizes a PIL image for a PyTorch model,\n returns an Numpy array\n '''\n _CROP_DIM = 224\n _RESIZE_SIZE = 256\n\n im = Image.fromarray(np.array(image))\n\n resized = resize_and_keep_ar(im, _RESIZE_SIZE)\n cropped = center_square_crop(resized, _CROP_DIM)\n normalized = normalize_image(cropped)\n\n return normalized\n\n\ndef imshow(image, ax=None, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n if ax is None:\n fig, ax = plt.subplots()\n\n # PyTorch tensors assume the color channel is the first dimension\n # but matplotlib assumes is the third dimension\n image = image.numpy().transpose((1, 2, 0))\n\n # Undo preprocessing\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.229, 0.224, 0.225])\n image = std * image + mean\n\n # Image needs to be clipped between 0 and 1 or it looks like noise when displayed\n image = np.clip(image, 0, 1)\n\n if title is not None:\n ax.set_title(title)\n else:\n ax.set_title(\"Flower Species\")\n\n ax.imshow(image)\n\n return ax\n\n\ndef predict(image_path_, model_, device_='cpu', topk=5, class_mapping_file=None):\n ''' Predict the class (or classes) of an image using a trained deep learning model.\n '''\n\n logger.info(\"Starting prediction mode with top-{}\".format(topk))\n\n image_as_tensor = torch.Tensor(process_image(Image.open(image_path_))).reshape([1, 3, 224, 224])\n reverse = {k: v for v, k in model_.class_to_index.items()}\n\n # Switch the model to eval mode\n model_.eval()\n model_ = model_.to(device_)\n image_as_tensor = image_as_tensor.to(device_) # Switching input to same device as model.\n\n class_labels = get_cat_to_name(class_mapping_file if class_mapping_file is not None else \"cat_to_name.json\")\n\n # Predict input\n predicted_ = F.softmax(model_(image_as_tensor), dim=1)\n preds = predicted_.topk(topk)\n\n probs = [float(prob) for prob in preds[0][0]]\n classes = [class_labels[str(reverse[int(cls)])] for cls in preds[1][0]]\n\n return probs, classes\n\n\ndef plot_charts(tensor_image, true_label, pred_classes, pred_probas):\n ''' Auxiliar function used to create and plot charts given an image and some required information.\n '''\n\n fig, (ax1, ax2) = plt.subplots(2, figsize=(5, 10))\n\n cat_to_name = get_cat_to_name()\n\n y_pos = np.arange(len(pred_classes))\n performance = np.asarray(pred_probas)\n classes = (cat_to_name[class_] for class_ in pred_classes)\n\n ax2.barh(y_pos, performance, align='center', color='blue', ecolor='black')\n ax2.set_yticks(y_pos)\n ax2.set_yticklabels(classes)\n ax2.invert_yaxis()\n ax2.set_xlabel('Probability')\n ax2.set_title('Predicted classes')\n\n fig.subplots_adjust(hspace=0.3)\n\n imshow(tensor_image, ax=ax1, title=cat_to_name[true_label])\n\n plt.show()\n\n\ndef sanity_check(image_path_, model_, topk=5):\n prb_, cls_ = predict(image_path_, model_, topk)\n\n tensor_image = torch.Tensor(process_image(Image.open(image_path)))\n true_label = image_path.split('/')[2]\n\n plot_charts(tensor_image, true_label, cls_, prb_)\n\n\nif __name__ == '__main__':\n\n args = parser.parse_args()\n\n image_path = args.image_path\n model_path = args.model_path\n class_file = args.class_file_path\n k = args.k\n\n if args.device == 'cuda':\n if not torch.cuda.is_available():\n device = 'cpu'\n logger.warning('Cuda is not available on this machine, setting device to cpu')\n else:\n device = args.device\n else:\n device = args.device\n\n logger.info('Device mode set to {}'.format(device))\n\n model = load_model_checkpoint_only(model_path, device)\n probs, classes = predict(image_path, model, device_=device, topk=1, class_mapping_file=class_file)\n logger.info(list(zip(probs, classes)))\n\n top_k_probs, top_k_classes = predict(image_path, model, device_=device, topk=k, class_mapping_file=class_file)\n logger.info(list(zip(top_k_probs, top_k_classes)))\n", "repo_name": "pcastanha/image", "sub_path": "predict.py", "file_name": "predict.py", "file_ext": "py", "file_size_in_byte": 8875, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "logging.basicConfig", "line_number": 14, "usage_type": "call"}, {"api_name": "logging.DEBUG", "line_number": 14, "usage_type": "attribute"}, {"api_name": "logging.getLogger", "line_number": 18, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 20, "usage_type": "call"}, {"api_name": "json.load", "line_number": 39, "usage_type": "call"}, {"api_name": "torch.nn.Module", "line_number": 46, "usage_type": "attribute"}, {"api_name": "torch.nn", "line_number": 46, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 50, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 50, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 51, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 51, "usage_type": "name"}, {"api_name": "torch.nn.Dropout", "line_number": 53, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 53, "usage_type": "name"}, {"api_name": "torch.nn.Linear", "line_number": 59, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 59, "usage_type": "name"}, {"api_name": "torch.nn.ModuleList", "line_number": 61, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 61, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 65, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 65, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 68, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 68, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 70, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 70, "usage_type": "name"}, {"api_name": "torch.nn.functional.relu", "line_number": 73, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 73, "usage_type": "name"}, {"api_name": "torch.load", "line_number": 95, "usage_type": "call"}, {"api_name": "torchvision.models.__dict__", "line_number": 97, "usage_type": "attribute"}, {"api_name": "torchvision.models", "line_number": 97, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 153, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 154, "usage_type": "call"}, {"api_name": "torch.ByteTensor", "line_number": 156, "usage_type": "attribute"}, {"api_name": "PIL.Image.fromarray", "line_number": 172, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 172, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 172, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 184, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 192, "usage_type": "call"}, {"api_name": "numpy.clip", "line_number": 196, "usage_type": "call"}, {"api_name": "torch.Tensor", "line_number": 214, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 214, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 214, "usage_type": "name"}, {"api_name": "torch.nn.functional.softmax", "line_number": 225, "usage_type": "call"}, {"api_name": "torch.nn.functional", "line_number": 225, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.subplots", "line_number": 238, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 238, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.asarray", "line_number": 243, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.show", "line_number": 257, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 257, "usage_type": "name"}, {"api_name": "torch.Tensor", "line_number": 263, "usage_type": "call"}, {"api_name": "PIL.Image.open", "line_number": 263, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 263, "usage_type": "name"}, {"api_name": "torch.cuda.is_available", "line_number": 279, "usage_type": "call"}, {"api_name": "torch.cuda", "line_number": 279, "usage_type": "attribute"}]} +{"seq_id": "39080496738", "text": "from flask import Flask, request, send_from_directory\nfrom flask_restful import Api, Resource\nimport json\nimport os\nfrom flask_cors import CORS\nfrom models import db, Article\n\nbasedir = os.path.dirname(os.path.abspath(__file__))\nSQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')\napp = Flask(__name__)\napp.config.update({\n 'SQLALCHEMY_TRACK_MODIFICATIONS': True,\n \"SQLALCHEMY_DATABASE_URI\": SQLALCHEMY_DATABASE_URI,\n})\napi = Api(app)\ncors = CORS(app)\ndb.init_app(app)\nIMAGE_PATH = os.path.join(basedir, 'image')\n\ndef serializer(l):\n ret = []\n for row in l:\n ret.append(json.loads(row.serialize()))\n return ret \n\nclass Picture(Resource):\n def get(self, name):\n return send_from_directory(IMAGE_PATH, name)\n\nclass ArticleList(Resource):\n def get_articles(self):\n articles = Article.query.all()\n return articles \n\n def get(self):\n articles = self.get_articles()\n return serializer(articles)\n\n def post(self):\n r_json = request.get_json() \n title = r_json['title']\n image = r_json['image']\n category = r_json['category']\n content = r_json['content']\n new_article = Article(title, image, category, content)\n db.session.add(new_article)\n db.session.commit()\n return \"write successfully\"\n\n def put(self):\n r_json = request.get_json()\n _id = r_json['id']\n title = r_json['title']\n image = r_json['image']\n category = r_json['category']\n content = r_json['content']\n article = Article.query.filter_by(id=_id).first()\n if not article:\n return \"article[:{}] is not exists\".format(_id)\n article.title = title\n article.image = image\n article.content = content\n db.session.commit()\n return \"update successfully\"\n\n def delete(self):\n r_json = request.get_json()\n _id = r_json['id']\n article = Article.query.filter_by(id=_id).first()\n if not article:\n return \"article[:{}] is not exists\".format(_id)\n db.session.delete(article)\n db.session.commit()\n return \"delete successfully\"\n\napi.add_resource(ArticleList, '/api/articles')\napi.add_resource(Picture, '/api/pictures/')\n\nif __name__ == '__main__':\n with app.app_context():\n db.create_all()\n app.run(host='0.0.0.0', port=5000, debug=True)\n\n", "repo_name": "sisobus/WebStudio2019", "sub_path": "projects/20151172/api/boilerplate.py", "file_name": "boilerplate.py", "file_ext": "py", "file_size_in_byte": 2416, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 14, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.path.dirname", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path", "line_number": 8, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 8, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 9, "usage_type": "call"}, {"api_name": "os.path", "line_number": 9, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 10, "usage_type": "call"}, {"api_name": "flask_restful.Api", "line_number": 15, "usage_type": "call"}, {"api_name": "flask_cors.CORS", "line_number": 16, "usage_type": "call"}, {"api_name": "models.db.init_app", "line_number": 17, "usage_type": "call"}, {"api_name": "models.db", "line_number": 17, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path", "line_number": 18, "usage_type": "attribute"}, {"api_name": "json.loads", "line_number": 23, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 26, "usage_type": "name"}, {"api_name": "flask.send_from_directory", "line_number": 28, "usage_type": "call"}, {"api_name": "flask_restful.Resource", "line_number": 30, "usage_type": "name"}, {"api_name": "models.Article.query.all", "line_number": 32, "usage_type": "call"}, {"api_name": "models.Article.query", "line_number": 32, "usage_type": "attribute"}, {"api_name": "models.Article", "line_number": 32, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 40, "usage_type": "name"}, {"api_name": "models.Article", "line_number": 45, "usage_type": "call"}, {"api_name": "models.db.session.add", "line_number": 46, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 46, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 46, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 47, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 47, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 47, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 51, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 51, "usage_type": "name"}, {"api_name": "models.Article.query.filter_by", "line_number": 57, "usage_type": "call"}, {"api_name": "models.Article.query", "line_number": 57, "usage_type": "attribute"}, {"api_name": "models.Article", "line_number": 57, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 63, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 63, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 63, "usage_type": "name"}, {"api_name": "flask.request.get_json", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.request", "line_number": 67, "usage_type": "name"}, {"api_name": "models.Article.query.filter_by", "line_number": 69, "usage_type": "call"}, {"api_name": "models.Article.query", "line_number": 69, "usage_type": "attribute"}, {"api_name": "models.Article", "line_number": 69, "usage_type": "name"}, {"api_name": "models.db.session.delete", "line_number": 72, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 72, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 72, "usage_type": "name"}, {"api_name": "models.db.session.commit", "line_number": 73, "usage_type": "call"}, {"api_name": "models.db.session", "line_number": 73, "usage_type": "attribute"}, {"api_name": "models.db", "line_number": 73, "usage_type": "name"}, {"api_name": "models.db.create_all", "line_number": 81, "usage_type": "call"}, {"api_name": "models.db", "line_number": 81, "usage_type": "name"}]} +{"seq_id": "17125018016", "text": "# -*- coding: utf-8 -*-\n# @Author : Virace\n# @Email : Virace@aliyun.com\n# @Site : x-item.com\n# @Software: Pycharm\n# @Create : 2022/8/26 14:11\n# @Update : 2023/3/9 17:11\n# @Detail : 描述\n\nimport gc\nimport json\nimport os\nimport re\nimport sys\nfrom collections import defaultdict\nfrom typing import Dict, List\n\nimport lol_voice\nfrom loguru import logger\nfrom lol_voice.formats import BIN, StringHash, WAD\n\nfrom Data.Manifest import GameData\nfrom Utils.common import de_duplication, makedirs, tree\nfrom config import GAME_CHAMPION_PATH, GAME_MAPS_PATH, GAME_REGION, HASH_PATH, LOG_PATH\n\nEVENT_HASH_PATH = os.path.join(HASH_PATH, 'event')\nE2A_HASH_PATH = os.path.join(HASH_PATH, 'event2audio')\nmakedirs(EVENT_HASH_PATH)\nmakedirs(E2A_HASH_PATH)\n\n# 游戏数据相关\ngame_data = GameData()\ngame_data_default = GameData('en_us')\n\n\ndef file_classify(b, region) -> tree:\n \"\"\"\n 分类, 区分事件和资源文件\n :param b: 好几层的dict\n :param region:\n :return:\n \"\"\"\n\n def check_path(paths):\n for p in paths:\n p = p.lower()\n if '_sfx_' in p:\n return 'SFX'\n elif '_vo_' in p:\n return 'VO'\n elif 'mus_' in p:\n return 'MUSIC'\n return 'SFX'\n\n region = region.lower()\n\n for kind in b:\n for name in b[kind]:\n for skin in b[kind][name]:\n items = b[kind][name][skin]\n this = defaultdict(list)\n for item in items:\n if len(item) == 1:\n continue\n _type = check_path(item)\n\n events = ''\n audio = []\n for path in item:\n # 哈希表的路径是无所谓大小写(因为最后计算还是按小写+)\n path = path.lower().replace('en_us', region)\n if 'events' in path:\n events = path\n elif 'audio' in path:\n audio.append(path)\n this[_type].append({'events': events, 'audio': audio})\n b[kind][name][skin] = this\n return b\n\n\ndef get_bin_hashes(update=False) -> Dict:\n \"\"\"\n 穷举皮肤ID, 0~100, 取出bin哈希表\n 这个哈希表是用来从wad中提取bin文件用的。\n 所以 就算 实际皮肤ID不存在也无所谓。\n :param update: 强制更新\n :return:\n \"\"\"\n target = os.path.join(HASH_PATH, 'bin.json')\n if os.path.exists(target) and not update:\n return json.load(open(target, encoding='utf-8'))\n\n # map是整理好的, 几乎没见过更新位置, 所以写死了\n # 如果有新的就直接调用以下 WAD.get_hash(小写路径) 就行了\n result = {\n \"characters\": {},\n \"maps\": {\n \"common\": {\n \"15714053217970310635\": \"data/maps/shipping/common/common.bin\"\n },\n \"map11\": {\"4648248922051545971\": \"data/maps/shipping/map11/map11.bin\"},\n \"map12\": {\"10561014283630087560\": \"data/maps/shipping/map12/map12.bin\"},\n \"map21\": {\"15820477637625025279\": \"data/maps/shipping/map21/map21.bin\"},\n \"map22\": {\"2513799657867357310\": \"data/maps/shipping/map22/map22.bin\"}\n }}\n champion_list = game_data.get_champions_name()\n tpl = 'data/characters/{}/skins/skin{}.bin'\n\n for item in champion_list.keys():\n if item == 'none':\n continue\n\n # 循环0 到100, 是skin的编号\n result['characters'].update(\n {item: {WAD.get_hash(tpl.format(item, i)): tpl.format(item, i) for i in range(101)}})\n\n with open(target, 'w+') as f:\n json.dump(result, f)\n return result\n\n\ndef get_bnk_hashes(update=False) -> tree:\n \"\"\"\n 从bin文件中取出实际调用的音频文件列表\n regin不需要实际安装,比如获取其他语言的哈希表,不需要实际安装外服\n\n :param update: 是否强制更新所有已知哈希表\n :return: 一个tree结构, 就是一个分好类的json\n \"\"\"\n\n target = os.path.join(HASH_PATH, f'bnk.{GAME_REGION}.json')\n if os.path.exists(target) and not update:\n res = json.load(open(target, encoding='utf-8'))\n else:\n bin_hash = get_bin_hashes(update)\n\n res = tree()\n for kind, parts in bin_hash.items():\n # companions为云顶小英雄特效音, 英雄的bin文件中没有事件信息,应该在其他bin里面\n # 但是音频音效都是重复的,也没多大关系,这里就跳过了\n if kind == 'companions':\n continue\n for name, bins in parts.items():\n\n if kind == 'characters':\n wad_file = os.path.join(GAME_CHAMPION_PATH, f'{name.capitalize()}.wad.client')\n elif kind == 'maps':\n wad_file = os.path.join(GAME_MAPS_PATH, f'{name.capitalize()}.wad.client')\n else:\n wad_file = os.path.join(GAME_MAPS_PATH, 'Map22.wad.client')\n\n bin_paths = list(bins.values())\n ids = [os.path.splitext(os.path.basename(item))[0] for item in bin_paths]\n # extract 函数使用 list[path]作为参数, 可保证返回顺序\n raw_bins = WAD(wad_file).extract(bin_paths, raw=True)\n\n bs = []\n temp = set()\n for _id, raw in zip(ids, raw_bins):\n if not raw:\n continue\n # 解析Bin文件\n b = BIN(raw)\n # 音频文件列表\n p = b.audio_files\n # 去重\n temp, fs = de_duplication(temp, p)\n if fs:\n bs.append(b)\n res[kind][name][_id] = list(fs)\n else:\n if p:\n bs.append(b)\n del raw_bins\n if bs:\n get_event_hashes(kind, name, bs, True)\n\n # 这里其实不返回值也可以, 浅拷贝修改\n res = file_classify(res, GAME_REGION)\n with open(target, 'w+', encoding='utf-8') as f:\n json.dump(res, f)\n return res\n\n\ndef get_event_hashes(kind, name, bin_datas: List[BIN] = None, update=False) -> List:\n \"\"\"\n 根据bin文件获取事件哈希表\n :param kind:\n :param name:\n :param bin_datas: BIN对象列表,\n :param update:\n :return:\n \"\"\"\n target = os.path.join(HASH_PATH, 'event', kind, f'{name}.json')\n if os.path.exists(target) and not update:\n res = BIN.load_hash_table(target)\n else:\n res = set()\n for bin_data in bin_datas:\n if len(bin_data.hash_tables) == 0:\n continue\n t = bin_data.hash_tables\n res.update(t)\n\n res = list(res)\n if res:\n makedirs(os.path.dirname(target))\n with open(target, 'w+', encoding='utf-8') as f:\n json.dump(res, f, cls=StringHash.dump_cls())\n del bin_datas\n return res\n\n\ndef get_audio_hashes(items, wad_file, event_hashes, _type, kind, name, skin, update=False) -> None:\n \"\"\"\n 根据提供的信息生成事件ID与音频ID的哈希表\n :param items: 由bin_to_data返回的数据, 格式如下\n {\n \"events\":\n \"assets/sounds/wwise2016/vo/zh_cn/characters/aatrox/skins/base/aatrox_base_vo_events.bnk\",\n \"audio\":\n [\"assets/sounds/wwise2016/vo/zh_cn/characters/aatrox/skins/base/aatrox_base_vo_audio.bnk\",\n \"assets/sounds/wwise2016/vo/zh_cn/characters/aatrox/skins/base/aatrox_base_vo_audio.wpk\"]\n }\n :param wad_file: wad文件\n :param event_hashes: get_event_hashes 返回\n :param _type: 音频类型, VO/SFX/MUSIC\n :param kind: 音频类型, characters/companions/maps\n :param name: 英雄或地图名字\n :param skin: 皮肤或地图\n :param update: 是否强制更新\n :return:\n \"\"\"\n func_name = sys._getframe().f_code.co_name\n _log_file = os.path.join(LOG_PATH, f'{func_name}.{GAME_REGION}.log')\n warn_item = []\n\n def tt(value):\n temp = False\n if isinstance(value, list):\n for t in value:\n temp = temp or t\n return bool(temp)\n return bool(value)\n\n region = re.compile(r'\\w{2}_\\w{2}').search(wad_file)\n if region:\n region = region.group()\n else:\n region = 'Default'\n target = os.path.join(HASH_PATH, 'event2audio', region, _type, kind, name,\n f'{skin}.json')\n if os.path.exists(target) and not update:\n # 可以直接pass 这里json加载用来校验文件是否正常\n # d = json.load(open(target, encoding='utf-8'))\n # del d\n # gc.collect()\n pass\n\n else:\n res = tree()\n relative_wad_path = 'Game' + wad_file.split('Game')[-1].replace('\\\\', '/')\n for item in items:\n if not item['events']:\n logger.info(f'无事件文件: {kind}, {name}, {skin}, {_type}')\n return\n\n files = [item['events'], *item['audio']]\n data_raw = WAD(wad_file).extract(files, raw=True)\n if not tt(data_raw):\n warn_item.append((wad_file, item[\"events\"]))\n logger.trace(f'WAD无文件解包: {wad_file}, {name}, {skin}, {_type}, {item[\"events\"]}')\n continue\n\n # 事件就一个,音频可能有多个,一般是两个\n event_raw, *audio_raw = data_raw\n try:\n event_hash = lol_voice.get_event_hashtable(event_hashes, event_raw)\n except KeyError:\n # characters, zyra, skin2, SFX, 这个bnk文件events和audio是相反的\n if len(audio_raw) > 1:\n raise ValueError(f'未知错误, {kind}, {name}, {skin}, {_type}')\n event_hash = lol_voice.get_event_hashtable(event_hashes, audio_raw[0])\n audio_raw = [event_raw]\n\n for raw in audio_raw:\n audio_hash = lol_voice.get_audio_hashtable(event_hash, raw)\n if audio_hash:\n # log.info(f'to_audio_hashtable, {kind}, {name}, {skin}, {_type}')\n res['data'][item['audio'][audio_raw.index(raw)]] = audio_hash\n del event_raw\n del data_raw\n del audio_raw\n\n if res:\n path = os.path.dirname(target)\n\n makedirs(path)\n res['info'] = {\n 'kind': kind,\n 'name': name,\n 'detail': skin,\n 'type': _type,\n 'wad': relative_wad_path\n }\n with open(target, 'w+', encoding='utf-8') as f:\n json.dump(res, f)\n del res\n gc.collect()\n # log.info(f'to_audio_hashtable: {kind}, {name}, {skin}, {_type}')\n\n with open(_log_file, 'a+', encoding='utf-8') as f:\n for item in warn_item:\n f.write(f'{item}\\n')\n\n", "repo_name": "Virace/lol_extract_voice", "sub_path": "Hashes/__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 11122, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 15, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.path.join", "line_number": 26, "usage_type": "call"}, {"api_name": "config.HASH_PATH", "line_number": 26, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 26, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 27, "usage_type": "call"}, {"api_name": "config.HASH_PATH", "line_number": 27, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 27, "usage_type": "attribute"}, {"api_name": "Utils.common.makedirs", "line_number": 28, "usage_type": "call"}, {"api_name": "Utils.common.makedirs", "line_number": 29, "usage_type": "call"}, {"api_name": "Data.Manifest.GameData", "line_number": 32, "usage_type": "call"}, {"api_name": "Data.Manifest.GameData", "line_number": 33, "usage_type": "call"}, {"api_name": "collections.defaultdict", "line_number": 61, "usage_type": "call"}, {"api_name": "Utils.common.tree", "line_number": 36, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "config.HASH_PATH", "line_number": 89, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 90, "usage_type": "call"}, {"api_name": "os.path", "line_number": 90, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 91, "usage_type": "call"}, {"api_name": "lol_voice.formats.WAD.get_hash", "line_number": 115, "usage_type": "call"}, {"api_name": "lol_voice.formats.WAD", "line_number": 115, "usage_type": "name"}, {"api_name": "json.dump", "line_number": 118, "usage_type": "call"}, {"api_name": "typing.Dict", "line_number": 81, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 131, "usage_type": "call"}, {"api_name": "config.HASH_PATH", "line_number": 131, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 131, "usage_type": "attribute"}, {"api_name": "config.GAME_REGION", "line_number": 131, "usage_type": "name"}, {"api_name": "os.path.exists", "line_number": 132, "usage_type": "call"}, {"api_name": "os.path", "line_number": 132, "usage_type": "attribute"}, {"api_name": "json.load", "line_number": 133, "usage_type": "call"}, {"api_name": "Utils.common.tree", "line_number": 137, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 146, "usage_type": "call"}, {"api_name": "config.GAME_CHAMPION_PATH", "line_number": 146, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 146, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 148, "usage_type": "call"}, {"api_name": "config.GAME_MAPS_PATH", "line_number": 148, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 148, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 150, "usage_type": "call"}, {"api_name": "config.GAME_MAPS_PATH", "line_number": 150, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 150, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 153, "usage_type": "call"}, {"api_name": "os.path", "line_number": 153, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 153, "usage_type": "call"}, {"api_name": "lol_voice.formats.WAD", "line_number": 155, "usage_type": "call"}, {"api_name": "lol_voice.formats.BIN", "line_number": 163, "usage_type": "call"}, {"api_name": "Utils.common.de_duplication", "line_number": 167, "usage_type": "call"}, {"api_name": "config.GAME_REGION", "line_number": 179, "usage_type": "argument"}, {"api_name": "json.dump", "line_number": 181, "usage_type": "call"}, {"api_name": "Utils.common.tree", "line_number": 122, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 185, "usage_type": "name"}, {"api_name": "lol_voice.formats.BIN", "line_number": 185, "usage_type": "name"}, {"api_name": "os.path.join", "line_number": 194, "usage_type": "call"}, {"api_name": "config.HASH_PATH", "line_number": 194, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 194, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 195, "usage_type": "call"}, {"api_name": "os.path", "line_number": 195, "usage_type": "attribute"}, {"api_name": "lol_voice.formats.BIN.load_hash_table", "line_number": 196, "usage_type": "call"}, {"api_name": "lol_voice.formats.BIN", "line_number": 196, "usage_type": "name"}, {"api_name": "Utils.common.makedirs", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 207, "usage_type": "call"}, {"api_name": "os.path", "line_number": 207, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 209, "usage_type": "call"}, {"api_name": "lol_voice.formats.StringHash.dump_cls", "line_number": 209, "usage_type": "call"}, {"api_name": "lol_voice.formats.StringHash", "line_number": 209, "usage_type": "name"}, {"api_name": "sys._getframe", "line_number": 234, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 235, "usage_type": "call"}, {"api_name": "config.LOG_PATH", "line_number": 235, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 235, "usage_type": "attribute"}, {"api_name": "config.GAME_REGION", "line_number": 235, "usage_type": "name"}, {"api_name": "re.compile", "line_number": 246, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 251, "usage_type": "call"}, {"api_name": "config.HASH_PATH", "line_number": 251, "usage_type": "argument"}, {"api_name": "os.path", "line_number": 251, "usage_type": "attribute"}, {"api_name": "os.path.exists", "line_number": 253, "usage_type": "call"}, {"api_name": "os.path", "line_number": 253, "usage_type": "attribute"}, {"api_name": "Utils.common.tree", "line_number": 261, "usage_type": "call"}, {"api_name": "loguru.logger.info", "line_number": 265, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 265, "usage_type": "name"}, {"api_name": "lol_voice.formats.WAD", "line_number": 269, "usage_type": "call"}, {"api_name": "loguru.logger.trace", "line_number": 272, "usage_type": "call"}, {"api_name": "loguru.logger", "line_number": 272, "usage_type": "name"}, {"api_name": "lol_voice.get_event_hashtable", "line_number": 278, "usage_type": "call"}, {"api_name": "lol_voice.get_event_hashtable", "line_number": 283, "usage_type": "call"}, {"api_name": "lol_voice.get_audio_hashtable", "line_number": 287, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 296, "usage_type": "call"}, {"api_name": "os.path", "line_number": 296, "usage_type": "attribute"}, {"api_name": "Utils.common.makedirs", "line_number": 298, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 307, "usage_type": "call"}, {"api_name": "gc.collect", "line_number": 309, "usage_type": "call"}]} +{"seq_id": "42409250775", "text": "import datetime\n\n\ndef parse_dict(data: dict, path_list=None, res=None) -> list:\n \"\"\"\n Recursive function which makes iris globals compatible data structure from dict\n\n :param data: dict, required field\n :param path_list:\n :param res:\n :return: list\n \"\"\"\n if res is None:\n res = []\n if path_list is None:\n path_list = []\n\n new_data = {}\n if isinstance(data, list):\n for i, item in enumerate(data):\n new_data[i] = item\n else:\n new_data = data\n\n if isinstance(new_data, dict):\n for key, value in new_data.items():\n\n if isinstance(value, list):\n i = 0\n for el in value:\n path_part = [key, i]\n if isinstance(el, dict):\n parse_dict(el, path_list + path_part, res)\n else:\n if isinstance(el, (datetime.date, datetime.datetime)):\n el = el.isoformat()\n res.append({\n \"path_list\": path_list + path_part,\n \"value\": el\n })\n i += 1\n\n elif isinstance(value, dict):\n mod_path = path_list + [key]\n parse_dict(value, mod_path, res)\n\n else:\n mod_path = path_list + [key]\n if isinstance(value, (datetime.date, datetime.datetime)):\n value = value.isoformat()\n res.append({\n \"path_list\": mod_path,\n \"value\": value\n })\n return res\n", "repo_name": "danoleg/mongo-to-iris-migration", "sub_path": "app/helpers/parser.py", "file_name": "parser.py", "file_ext": "py", "file_size_in_byte": 1655, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "77", "api": [{"api_name": "datetime.date", "line_number": 35, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 35, "usage_type": "attribute"}, {"api_name": "datetime.date", "line_number": 49, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 49, "usage_type": "attribute"}]} +{"seq_id": "40570813369", "text": "import altair_saver\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport pandas as pd\nimport glob\n\nfrom tools.paperv2.utils import *\n\nSCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))\n\nRESULTS_DIR = \"/sdb/paperv2_results/\"\n\n\ndef rand_jitter(arr):\n stdev = .005 * (max(arr) - min(arr))\n return arr + np.random.randn(len(arr)) * stdev\n\n\ndef filter(df, **kwargs):\n bool_index = None\n for key, value in kwargs.items():\n if isinstance(value, list):\n _bool_index = df[key].isin(value)\n else:\n _bool_index = df[key] == value\n if bool_index is None:\n bool_index = _bool_index\n else:\n bool_index = bool_index & _bool_index\n return df[bool_index]\n\n\ndef load_dlmc_df(subdir, nthreads=None, bcols=None):\n # assert nthreads or bcols\n\n if nthreads is not None and bcols is not None:\n return pd.read_csv(RESULTS_DIR + '/cache/' + subdir + f'/dlmc_bcols_{bcols}_nthreads_{nthreads}.csv')\n elif nthreads is not None:\n return pd.read_csv(RESULTS_DIR + '/cache/' + subdir + f'/dlmc_nthreads_{nthreads}.csv')\n elif bcols is not None:\n return pd.read_csv(RESULTS_DIR + '/cache/' + subdir + f'/dlmc_bcols_{bcols}.csv')\n else:\n all_files = glob.glob(os.path.join(RESULTS_DIR + 'cache/' + subdir, \"*_per_part.csv\" ))\n return pd.concat((pd.read_csv(f) for f in all_files), axis=0, ignore_index=True)\n\n\ndef create_chart_grid(charts, row_width):\n charts_merged = None\n charts_row = None\n\n col = 0\n for i in range(0, len(charts)):\n col = i % row_width\n if col == 0:\n charts_merged = charts_row if charts_merged is None else charts_merged & charts_row\n charts_row = None\n\n charts_row = charts[i] if charts_row is None else charts_row | charts[i]\n\n if col:\n charts_merged = charts_row if charts_merged is None else charts_merged & charts_row\n return charts_merged\n\n\ndef chart_save(chart, filename):\n filepath = PLOTS_DIR + filename\n filepath = filepath.replace(\".png\", \"\") + \".png\"\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n altair_saver.save(chart, filepath, fmt=\"png\", scale_fator=4)\n\n\ndef plot_save(filename):\n filepath = PLOTS_DIR + filename\n filepath = filepath.replace(\".pdf\", \"\") + \".pdf\"\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n plt.savefig(filepath)\n", "repo_name": "SpRegTiling/sparse-register-tiling", "sub_path": "tools/paperv2/plot_utils.py", "file_name": "plot_utils.py", "file_ext": "py", "file_size_in_byte": 2408, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 7, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.path.dirname", "line_number": 10, "usage_type": "call"}, {"api_name": "os.path", "line_number": 10, "usage_type": "attribute"}, {"api_name": "os.path.realpath", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.random.randn", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 38, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 42, "usage_type": "call"}, {"api_name": "glob.glob", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 44, "usage_type": "call"}, {"api_name": "os.path", "line_number": 44, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 45, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "altair_saver.save", "line_number": 70, "usage_type": "call"}, {"api_name": "os.makedirs", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 76, "usage_type": "call"}, {"api_name": "os.path", "line_number": 76, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 77, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 77, "usage_type": "name"}]} +{"seq_id": "73003114487", "text": "from datetime import datetime\nfrom django.db import models\n\nclass TimeStampedModel(models.Model):\n created_at = models.DateTimeField(auto_now_add=True)\n modified_at = models.DateTimeField(auto_now=True)\n\n class Meta:\n abstract = True\n\nclass Rental(TimeStampedModel):\n name = models.CharField(max_length=30)\n\nclass Reservation(TimeStampedModel):\n rental = models.ForeignKey(Rental, on_delete=models.CASCADE, null=False, blank=False)\n checkin = models.DateField()\n checkout = models.DateField()", "repo_name": "qnx-dev/rentalproject", "sub_path": "base/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 520, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "django.db.models.Model", "line_number": 4, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 4, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 5, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 5, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 6, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 6, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 15, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 15, "usage_type": "name"}, {"api_name": "django.db.models.CASCADE", "line_number": 15, "usage_type": "attribute"}, {"api_name": "django.db.models.DateField", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.DateField", "line_number": 17, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 17, "usage_type": "name"}]} +{"seq_id": "35906880465", "text": " # -*- coding: utf-8 -*-\nfrom django.shortcuts import render\n\nARABIZI_CHARS_AR = u\"ا ب ت ث ج ح خ د ذ ر ز س ش ص د ط ظ ع غ ف ق ك ل م ن ه و ي\".split()\nARABIZI_CHARS_EN = u\"a b t th j 7 `7 d th r z s sh 9 `9 6 `6 3 `3 f \\\"9 k l m n h w y\".split()\nARABIZI_CHARS = dict(zip(ARABIZI_CHARS_AR, ARABIZI_CHARS_EN))\n\ndef convert_arabizi(s):\n return u\"\".join([ARABIZI_CHARS.get(x,x) for x in s])\n\ndef arabizi(request, word=None):\n word = word or request.GET.get(\"w\")\n return render(\n request,\n \"arabizi.html\",\n {\n \"word\": word,\n \"aword\": word and convert_arabizi(word),\n },\n )\n\n# Create your views here.\ndef hello_world(request, username=None):\n return render(\n request,\n \"hello_world.html\",\n {\n \"name\": username,\n },\n )\n\n\n", "repo_name": "koutbo6/djc_1stproj", "sub_path": "myfirstapp/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 846, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.shortcuts.render", "line_number": 13, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 24, "usage_type": "call"}]} +{"seq_id": "6701117449", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nCreated on 2023-03-30\n\nUpdated on 2023-07-17\n\n@author: Laurens P. Stoop\n\"\"\"\n\n\n# load the dependencies\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport xarray as xr\nimport sys\nfrom datetime import datetime\nimport matplotlib.dates as mdates\nimport matplotlib.transforms as mtransforms\n\n# The scripts\nsys.path.append('/Users/3986209/Library/CloudStorage/OneDrive-UniversiteitUtrecht/Projects/ccmetrics/src/')\n\n\n#%% \nimport matplotlib.pylab as pylab\nparams = {'legend.fontsize': 'xx-large',\n 'figure.figsize': (15, 5),\n 'axes.labelsize': 'xx-large',\n 'axes.titlesize':'xx-large',\n 'xtick.labelsize':'xx-large',\n 'ytick.labelsize':'xx-large'}\npylab.rcParams.update(params)\n\n\n\n## colour definitions\n\n# Solar\ncolour_solar = 'burlywood' # 0.03\ncolour_solar_clim = 'orange' # 1\ncolour_solar_hrw = 'tab:red' # 0.7\n\n# Wind\ncolour_wind = 'skyblue' # 0.03\ncolour_wind_clim = 'lightsteelblue' # 1\ncolour_wind_hrw = 'dodgerblue' # 0.7\n\n\n\n\n# Region selection\nREGION = 'NL01'\n# REGION = 'SK00'\n# REGION = 'SE02' \n# REGION = 'FR10'\n\n\n#%%\n# =============================================================================\n# Define file locations\n# =============================================================================\n\n# Define some folders\nFOLDER_drive='/Users/3986209/Library/CloudStorage/OneDrive-UniversiteitUtrecht/'\nFOLDER_project=FOLDER_drive+'Projects/ccmetrics/'\nFOLDER_pecd = FOLDER_drive+'Data/PECD/HIST/ENER/'\n\n# file name\nfileName_SPV = 'SPV/PEON/H_ERA5_ECMW_T639_SPV_0000m_Pecd_PEON_S198001010000_E202112312300_CFR_TIM_01h_NA-_noc_org_NA_NA---_NA---_PhM01.csv'\nfileName_WON = 'WON/PEON/H_ERA5_ECMW_T639_WON_NA---_Pecd_PEON_S198001010000_E202112312300_CFR_TIM_01h_NA-_noc_org_30_NA---_NA---_PhM01.csv'\n\n\n#%%\n# =============================================================================\n# Get the data to open\n# =============================================================================\n\n# Open the file and set the index as the date\ndf_SPV = pd.read_csv(FOLDER_pecd+fileName_SPV, header=52, parse_dates=True, index_col='Date')\ndf_WON = pd.read_csv(FOLDER_pecd+fileName_WON, header=52, parse_dates=True, index_col='Date')\ndf_SPV.index = df_SPV.index.rename('time')\ndf_WON.index = df_WON.index.rename('time')\n\nds_SPV = df_SPV[REGION].to_xarray()\nds_WON = df_WON[REGION].to_xarray()\n\n\n#%%\n# =============================================================================\n# Fix time\n# =============================================================================\n\n# for easier figures we remove the leap days, see notes in RES-balance functions on how to keep this in\nds_SPV = ds_SPV.sel(time=~((ds_SPV.time.dt.month == 2) & (ds_SPV.time.dt.day == 29)))\nds_WON = ds_WON.sel(time=~((ds_WON.time.dt.month == 2) & (ds_WON.time.dt.day == 29)))\n\n\n\n#%%\n# =============================================================================\n# Figure for climatological behaviour\n# =============================================================================\n\n\n\n\n\n\n\n\n# we start a new figure\n# fig, axes = plt.subplots(nrows=2, ncols=3, figsize=(17,10))\nfig, axes = plt.subplot_mosaic([['a)', 'b)', 'c)'], ['d)', 'e)', 'f)']], figsize=(17,10))\n\n# fix date-format\nfig.autofmt_xdate()\n\n# show year\nds_WON.sel(time=slice(\"2002-01-01\", \"2004-06-15\")).plot(ax=axes['a)'], alpha=1, linewidth=0.25, color=colour_wind)\nds_SPV.sel(time=slice(\"2002-01-01\", \"2004-06-15\")).plot(ax=axes['d)'], alpha=1, linewidth=0.25, color=colour_solar)\n\n# Show subseasonal\nds_WON.sel(time=slice(\"2003-02-15\", \"2003-05-15\")).plot(ax=axes['b)'], alpha=1, linewidth=0.5, color=colour_wind)\nds_SPV.sel(time=slice(\"2003-02-15\", \"2003-05-15\")).plot(ax=axes['e)'], alpha=1, linewidth=0.5, color=colour_solar)\n\n# show diurnal\nds_WON.sel(time=slice(\"2003-04-03\", \"2003-04-10\")).plot(ax=axes['c)'], alpha=1, color=colour_wind)\nds_SPV.sel(time=slice(\"2003-04-03\", \"2003-04-10\")).plot(ax=axes['f)'], alpha=1, color=colour_solar)\n\n\n### Fix limits\naxes['a)'].set_ylim(0,0.92)\naxes['b)'].set_ylim(0,0.92)\naxes['c)'].set_ylim(0,0.92)\naxes['d)'].set_ylim(0,0.8)\naxes['e)'].set_ylim(0,0.8)\naxes['f)'].set_ylim(0,0.8)\n\n\n## formate the date-axis \n# years\nfor a in ['a)', 'd)']:\n axes[a].xaxis.set_major_locator(mdates.YearLocator(base=1))\n axes[a].xaxis.set_minor_locator(mdates.MonthLocator(bymonth=(1,4,7,10)))\n axes[a].xaxis.set_major_formatter(mdates.DateFormatter('%Y'))\n \n# months\nfor a in ['b)', 'e)']:\n axes[a].xaxis.set_major_locator(mdates.MonthLocator(bymonth=(3,4,5)))\n axes[a].xaxis.set_minor_locator(mdates.DayLocator(interval=3))\n axes[a].xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m'))\n\n# Dates\nfor a in ['c)', 'f)']:\n axes[a].xaxis.set_major_locator(mdates.DayLocator(interval=3))\n axes[a].xaxis.set_minor_locator(mdates.DayLocator(interval=1))\n axes[a].xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))\n\n### Fix labels\n# y-label\naxes['a)'].set_ylabel('Wind potential [0-1]')\naxes['d)'].set_ylabel('Solar potential [0-1]')\naxes['b)'].set_ylabel('')\naxes['e)'].set_ylabel('')\naxes['c)'].set_ylabel('')\naxes['f)'].set_ylabel('')\n\n# x-label\naxes['a)'].set_xlabel('')\naxes['b)'].set_xlabel('')\naxes['c)'].set_xlabel('')\naxes['d)'].set_xlabel('')\naxes['e)'].set_xlabel('')\naxes['f)'].set_xlabel('')\n\n# make it look better\nplt.tight_layout()\n\n# print subplot names\nfor label, ax in axes.items():\n # label physical distance in and down:\n trans = mtransforms.ScaledTranslation(10/72, -8/72, fig.dpi_scale_trans)\n ax.text(0.0, 1.0, label, transform=ax.transAxes + trans,\n fontsize='xx-large', verticalalignment='top')\n\n\nif REGION == 'NLO1':\n plt.savefig(FOLDER_project+'results/publication/Climatological_Behaviour.png')\n plt.savefig(FOLDER_project+'results/publication/Climatological_Behaviour.pdf')\nelse:\n plt.savefig(FOLDER_project+'results/additional_regions/Climatological_Behaviour_'+REGION+'.png')\n plt.savefig(FOLDER_project+'results/additional_regions/Climatological_Behaviour_'+REGION+'.pdf')\nplt.show()\n", "repo_name": "laurensstoop/CREDI", "sub_path": "src/Figure_climatological_behaviour.py", "file_name": "Figure_climatological_behaviour.py", "file_ext": "py", "file_size_in_byte": 6053, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "sys.path.append", "line_number": 24, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 24, "usage_type": "attribute"}, {"api_name": "matplotlib.pylab.rcParams.update", "line_number": 35, "usage_type": "call"}, {"api_name": "matplotlib.pylab.rcParams", "line_number": 35, "usage_type": "attribute"}, {"api_name": "matplotlib.pylab", "line_number": 35, "usage_type": "name"}, {"api_name": "pandas.read_csv", "line_number": 82, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.subplot_mosaic", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.dates.YearLocator", "line_number": 146, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 146, "usage_type": "name"}, {"api_name": "matplotlib.dates.MonthLocator", "line_number": 147, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 147, "usage_type": "name"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 148, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 148, "usage_type": "name"}, {"api_name": "matplotlib.dates.MonthLocator", "line_number": 152, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 152, "usage_type": "name"}, {"api_name": "matplotlib.dates.DayLocator", "line_number": 153, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 153, "usage_type": "name"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 154, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 154, "usage_type": "name"}, {"api_name": "matplotlib.dates.DayLocator", "line_number": 158, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 158, "usage_type": "name"}, {"api_name": "matplotlib.dates.DayLocator", "line_number": 159, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 159, "usage_type": "name"}, {"api_name": "matplotlib.dates.DateFormatter", "line_number": 160, "usage_type": "call"}, {"api_name": "matplotlib.dates", "line_number": 160, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.tight_layout", "line_number": 180, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 180, "usage_type": "name"}, {"api_name": "matplotlib.transforms.ScaledTranslation", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.transforms", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 191, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 191, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 192, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 192, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 194, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}]} +{"seq_id": "36187052188", "text": "import unittest\nimport sys\nsys.path.append('classifier/')\nsys.path.append('data/')\nimport s1_proximity\nimport data\n\nclass TestUnitProximityTransitions(unittest.TestCase):\n\n def setUp(self):\n self.data = data.Data()\n self.proximity = s1_proximity.ProximityState()\n\n def test_transition_19(self):\n self.data.set_proximity(19)\n ret = self.proximity.handle()\n\n self.assertTrue(ret == \"Inductive\", \"The expected transition was Inductive, we got: \" + ret)\n\n def test_transition_20(self):\n self.data.set_proximity(20)\n ret = self.proximity.handle()\n\n self.assertTrue(ret == \"Proximity\", \"The expected transition was Proximity, we got: \" + ret)\n\n def test_transition_10(self):\n self.data.set_proximity(10)\n ret = self.proximity.handle()\n\n self.assertTrue(ret == \"Inductive\", \"The expected transition was Inductive, we got: \" + ret)\n\n def test_transition_30(self):\n self.data.set_proximity(30)\n ret = self.proximity.handle()\n\n self.assertTrue(ret == \"Proximity\", \"The expected transition was Proximity, we got: \" + ret)\n\nif __name__ == '__main__':\n unittest.main()\n ", "repo_name": "Seanmullan/type-swipe", "sub_path": "src/RaspberryPi/tests/test_unit_proximity_transitions.py", "file_name": "test_unit_proximity_transitions.py", "file_ext": "py", "file_size_in_byte": 1177, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "77", "api": [{"api_name": "sys.path.append", "line_number": 3, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 3, "usage_type": "attribute"}, {"api_name": "sys.path.append", "line_number": 4, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 4, "usage_type": "attribute"}, {"api_name": "unittest.TestCase", "line_number": 8, "usage_type": "attribute"}, {"api_name": "data.Data", "line_number": 11, "usage_type": "call"}, {"api_name": "s1_proximity.ProximityState", "line_number": 12, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 39, "usage_type": "call"}]} +{"seq_id": "73978390647", "text": "from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\n\n\"\"\"\nWebscraping project: Getting all listing (with Requests) from Airbnb in India\nResults include: Name of listing, description, old price, original price and star reviews\nSaving results to a CSV file\n\"\"\"\n\n# Airbnb URL to get\nurl = 'https://www.airbnb.com/s/India/homes?adults=1&tab_id=home_tab&refinement_paths%5B%5D=%2Fhomes&query=India' \\\n '&place_id=ChIJkbeSa_BfYzARphNChaFPjNc&flexible_trip_lengths%5B%5D=one_week&price_filter_input_type=0' \\\n '&price_filter_num_nights=5&channel=EXPLORE&search_type=user_map_move&ne_lat=59.139778890987905&ne_lng=93' \\\n '.6332587156628&sw_lat=4.84557715667748&sw_lng=52.15767331460464&zoom=4&zoom_level=4&search_by_map=true'\n\nr = (requests.get(url)).content\n\nsoup = BeautifulSoup(r, 'html.parser')\n\n# Results are written to empty list Houses\nhouses = []\n\n# Loop from page 1 to page 15 of Airbnb\nfor i in range(1, 16):\n\n # Webscraping html\n div = soup.find('div', class_='gh7uyir giajdwt g14v8520 dir dir-ltr')\n div_card = div.find_all('div', class_='g1qv1ctd cb4nyux dir dir-ltr')\n\n for item in div_card:\n\n # Every loop sabes the item result to a dictionary\n dict_house = {}\n\n name = item.find('div', class_='t1jojoys dir dir-ltr').string\n description = item.find('span', class_='t6mzqp7 dir dir-ltr').string\n list_price = item.find('span', class_='a8jt5op dir dir-ltr').string\n price = list_price.split()\n original_price = ''\n if len(price) == 7:\n old_price = float(price[1])\n original_price = float(price[6])\n else:\n old_price = float(price[1])\n original_price = old_price\n\n stars = item.find('span', class_='t5eq1io r4a59j5 dir dir-ltr')\n\n if stars is not None:\n stars = ((stars['aria-label']).split())[0]\n else:\n stars = 'No Review'\n\n # Dictionary\n dict_house = {'page': i, 'name': name, 'description': description, 'old_price': old_price,\n 'original_price': original_price, 'stars': stars}\n\n # Adding the dictionary to a list\n houses.append(dict_house)\n\n # Getting the link for the next page\n before_np = soup.find('a', class_='l1j9v1wn c1ytbx3a dir dir-ltr')\n np = before_np.find('href')\n\n # If to verify if next page exists. If not, stop running\n if np is not None:\n cnp = 'https://www.airbnb.com' + np.get('href')\n\n url = str(cnp)\n\n r = (requests.get(url)).content\n\n soup = BeautifulSoup(r, 'html.parser')\n else:\n print('stop')\n\n# Using Pandas to save results to a CSV\ndata_frame = pd.DataFrame.from_dict(houses)\ndata_frame.to_csv('airbnb_stays.csv')\n", "repo_name": "pesseguita/webscraper-airbnb-to-csv", "sub_path": "webscraper-airbnb-to-csv/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2744, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 19, "usage_type": "call"}, {"api_name": "requests.get", "line_number": 72, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 74, "usage_type": "call"}, {"api_name": "pandas.DataFrame.from_dict", "line_number": 79, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 79, "usage_type": "attribute"}]} +{"seq_id": "1081739730", "text": "from microprediction import MicroWriter\nimport numpy as np\nfrom pprint import pprint\nimport matplotlib.pyplot as plt\nimport random \nimport time\nimport warnings\nwarnings.filterwarnings('ignore')\nfrom copulas.multivariate import GaussianMultivariate\nimport pandas as pd\n\n\n# Grab the Github secret \nimport os \nWRITE_KEY = os.environ.get('WRITE_KEY') # <-- You need to add a Github secret\nANIMAL = MicroWriter.animal_from_key(WRITE_KEY) # <-- Your nom de plume \nREPO = 'https://github.com/microprediction/microactors/blob/master/fit.py' # <--- Change your username\nprint('This is '+ANIMAL+' firing up')\n\nSTOP_LOSS = 25 # <--- Governs when we give up on a stream/horizon\n\n# Get historical data, fit a copula, and submit \n\ndef fit_and_sample(lagged_zvalues:[[float]],num:int, copula=None):\n \"\"\" Example of creating a \"sample\" of future values\n \n lagged_zvalues: [ [z1,z2,z3] ] distributed N(0,1) margins, roughly\n copula : Something from https://pypi.org/project/copulas/\n returns: [ [z1, z2, z3] ] representative sample\n\n Swap out this function for whatever you like. \n \"\"\"\n # Remark 1: It's lazy to just sample synthetic data\n # Remark 2: Any multivariate density estimation could go here. \n # Remark 3: If you prefer uniform margin, use mw.get_lagged_copulas(name=name, count= 5000) \n #\n # See https://www.microprediction.com/blog/lottery for discussion of this \"game\" \n \n df = pd.DataFrame(data=lagged_zvalues)\n if copula is None:\n copula = GaussianMultivariate() # <--- \n copula.fit(df)\n synthetic = copula.sample(num)\n return synthetic.values.tolist()\n\n\n\nif __name__ == \"__main__\":\n mw = MicroWriter(write_key=WRITE_KEY)\n mw.set_repository(REPO) # Just polite, creates a CODE badge on the leaderboard\n \n NAMES = [ n for n in mw.get_stream_names() if 'z2~' in n or 'z3~' in n ]\n for _ in range(1): \n name = random.choice(NAMES)\n lagged_zvalues = mw.get_lagged_zvalues(name=name, count= 5000)\n if len(lagged_zvalues)>20:\n zvalues = fit_and_sample(lagged_zvalues=lagged_zvalues, num=mw.num_predictions)\n pprint( (name, len(lagged_zvalues), len(zvalues)))\n try:\n for delay in mw.DELAYS:\n res = mw.submit_zvalues(name=name, zvalues=zvalues, delay=delay )\n pprint(res)\n except Exception as e:\n print(e)\n # Quit some stream/horizon combinations where we fare poorly\n mw.cancel_worst_active(stop_loss=STOP_LOSS, num=3)\n", "repo_name": "microprediction/microactors", "sub_path": "fit.py", "file_name": "fit.py", "file_ext": "py", "file_size_in_byte": 2596, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 4, "dataset": "github-code", "pt": "76", "api": [{"api_name": "warnings.filterwarnings", "line_number": 8, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 15, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 15, "usage_type": "attribute"}, {"api_name": "microprediction.MicroWriter.animal_from_key", "line_number": 16, "usage_type": "call"}, {"api_name": "microprediction.MicroWriter", "line_number": 16, "usage_type": "name"}, {"api_name": "pandas.DataFrame", "line_number": 39, "usage_type": "call"}, {"api_name": "copulas.multivariate.GaussianMultivariate", "line_number": 41, "usage_type": "call"}, {"api_name": "microprediction.MicroWriter", "line_number": 49, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 54, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 58, "usage_type": "call"}, {"api_name": "pprint.pprint", "line_number": 62, "usage_type": "call"}]} +{"seq_id": "2727769494", "text": "import os\nimport json\nimport pandas as pd\n\nimport numpy as np\nfrom datetime import datetime\n\n\ndef dump_json_log(options, train_results, output_directory):\n config = json.load(open(options.config))\n results = {\n 'training': {\n 'trials': train_results,\n 'average_train_auc': np.mean([result['train_auc'] for result in train_results]),\n 'average_valid_auc': np.mean([result['valid_auc'] for result in train_results]),\n 'train_auc_std': np.std([result['train_auc'] for result in train_results]),\n 'valid_auc_std': np.std([result['valid_auc'] for result in train_results]),\n 'average_train_time': np.mean([result['train_time'] for result in train_results])\n },\n 'config': config,\n }\n log_path = os.path.join(output_directory,\n os.path.splitext(os.path.basename(options.config))[0] + '.result.json')\n json.dump(results, open(log_path, 'w'), indent=2)\n\n\ndef dump_json_opt_log(bst_params, trials, output_path):\n\n ret = {\n 'bst_params': bst_params,\n 'trials': trials\n }\n\n\ndef update_result_summary(run_file, options, train_results):\n\n df = pd.DataFrame()\n\n df['datetime'] = [datetime.now().strftime('%Y/%m/%d %H:%M:%S')]\n df['run_file'] = [run_file] # os.path.basename(__file__)\n df['config_file'] = [options.config]\n df['average_train_auc'] = [np.mean([result['train_auc'] for result in train_results])]\n df['average_valid_auc'] = [np.mean([result['valid_auc'] for result in train_results])]\n df['train_auc_std'] = [np.std([result['train_auc'] for result in train_results])]\n df['valid_auc_std'] = [np.std([result['valid_auc'] for result in train_results])]\n\n result_file = '../data/output/result_summary.csv'\n if os.path.exists(result_file):\n ret_df = pd.read_csv(result_file)\n ret_df = pd.concat([ret_df, df], axis=0)\n else:\n ret_df = df\n\n ret_df.to_csv(result_file, index=False)\n", "repo_name": "SS1031/kaggle-HomeCreditDefaultRisk", "sub_path": "src/utils.py", "file_name": "utils.py", "file_ext": "py", "file_size_in_byte": 1981, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "76", "api": [{"api_name": "json.load", "line_number": 10, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 14, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 15, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 16, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 17, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 18, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.splitext", "line_number": 23, "usage_type": "call"}, {"api_name": "os.path", "line_number": 23, "usage_type": "attribute"}, {"api_name": "os.path.basename", "line_number": 23, "usage_type": "call"}, {"api_name": "json.dump", "line_number": 24, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 37, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 39, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 39, "usage_type": "name"}, {"api_name": "numpy.mean", "line_number": 42, "usage_type": "call"}, {"api_name": "numpy.mean", "line_number": 43, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 44, "usage_type": "call"}, {"api_name": "numpy.std", "line_number": 45, "usage_type": "call"}, {"api_name": "os.path.exists", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path", "line_number": 48, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 49, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 50, "usage_type": "call"}]} +{"seq_id": "1554283972", "text": "import scrapy\n\n\nclass VetclinicSpider(scrapy.Spider):\n name = 'vetclinic'\n start_urls = ['http://www.findalocalvet.com/']\n\n def parse(self, response):\n cities = response.css('#SideByCity .itemresult a::attr(href)').getall()\n for city in cities:\n link = response.urljoin(city)\n yield scrapy.Request(link, callback=self.parsecity)\n\n\n def parsecity(self, response):\n clinincs = response.css('.org::attr(href)').getall()\n for clinic in clinincs:\n link = response.urljoin(clinic)\n yield scrapy.Request(link, callback=self.parseclinic)\n \n Nextpage = response.css('a.dataheader:contains(\"Next\")::attr(href)').get()\n if Nextpage:\n \n Nextlink = response.urljoin(Nextpage)\n yield scrapy.Request(Nextlink, callback=self.parsecity) \n\n def parseclinic(self, response):\n \n yield {\n \n ' Name ' : response.css('.Results-Header h1::text').get(),\n ' city ' : response.css('.locality::text').get(), \n ' state ' : response.css('.region::text').get(), \n ' phone ' : response.css('.Phone::text').get(),\n ' site ' : response.url,\n } ", "repo_name": "Manoj-M-DS/WEB-Scraping", "sub_path": "vetclinic.py", "file_name": "vetclinic.py", "file_ext": "py", "file_size_in_byte": 1272, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "scrapy.Spider", "line_number": 4, "usage_type": "attribute"}, {"api_name": "scrapy.Request", "line_number": 12, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 19, "usage_type": "call"}, {"api_name": "scrapy.Request", "line_number": 25, "usage_type": "call"}]} +{"seq_id": "11885508801", "text": "from __future__ import print_function\n__author__ = 'kmchugg'\n\nimport random\nimport numpy as np\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\nfrom random import sample \nfrom matplotlib import pyplot\nimport statistics\nimport collections\n\nC = 0.15/0.05 #maximum value of the target \naccepted = 0\nrejected = 0\nsample_data = []\np = [0.06,0.06,0.06,0.06,0.06,0.15,0.13,0.14,0.15,0.13,0,0,0,0,0,0,0,0,0,0,] #extra zeros added added to accoujt for the size of q\np_plot = [0,0.06,0.06,0.06,0.06,0.06,0.15,0.13,0.14,0.15,0.13,0,0,0,0,0,0,0,0,0] #to plot the first value is taken as zero as the first value doesn't get printed.\nI = np.ones(20)\nq = 0.05*I\n# print(p)\n# print(q)\n\nfor i in range(10000):\n Y= random.randrange(1,21)\n value_test = C*np.random.rand(1)\n if(value_test <= p[Y-1]/0.05): # the algorithm\n sample_data.append(Y)\n accepted = accepted + 1\n else:\n rejected = rejected + 1\n \nefficiency = accepted / (accepted + rejected)\nprint('---efficiency---')\nprint(efficiency)\nprint('--calculated---')\nprint(1/C)\n\nprint(sample_data)\nvalue = []\nfreqList = (collections.Counter(sample_data)) #the disctionary that stores the frequency of occurrence of numbers\nprint(freqList)\ndenom = (len(sample_data))\nfor i in range(1, (len(freqList)+1)):\n value.append(freqList[i]/denom) #calculating the Probabilities\nprint('---value---')\nprint(value)\n\nx = [1,2,3,4,5,6,7,8,9,10]\ny = value\nprint('---x---')\nprint(x)\n\n\nplt.figure(figsize=(10, 3))\nplt.bar(x,y,align='center') # A bar chart\nplt.plot(p_plot,\"r-\")\nplt.xlabel('Numbers')\nplt.ylabel('Probabilities')\nplt.show()\n\nsam_mean = statistics.mean(sample_data)\nprint('---sample mean---')\nprint(sam_mean)\nsam_variance = statistics.variance(sample_data)\nprint('---sample variance---')\nprint(sam_variance)\n\n\ntheo_mean = 0\ntheo_var = 0\np_cal = [0.06,0.06,0.06,0.06,0.06,0.15,0.13,0.14,0.15,0.13]\nfor i in range(len(p_cal)):\n theo_mean = theo_mean + (i+1)*p_cal[i]\nfor i in range(len(p_cal)):\n theo_var = theo_var + (p_cal[i]*((i+1) - theo_mean)*((i+1) - theo_mean))\nprint('--theoretical mean---')\nprint(theo_mean)\nprint('--theoretical variance---')\nprint(theo_var)\n\nfig = plt.figure()\nplt.hist(p_plot,30,facecolor='b', alpha=1)\nplt.hist(sample_data,30,facecolor='r', alpha=1)\nplt.show()\n\n", "repo_name": "SahayDivyanshu/Simulation_Stochastic_Systems", "sub_path": "project3/CODE/Q5.py", "file_name": "Q5.py", "file_ext": "py", "file_size_in_byte": 2275, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "numpy.ones", "line_number": 19, "usage_type": "call"}, {"api_name": "random.randrange", "line_number": 25, "usage_type": "call"}, {"api_name": "numpy.random.rand", "line_number": 26, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 26, "usage_type": "attribute"}, {"api_name": "collections.Counter", "line_number": 41, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 55, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 55, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.bar", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 56, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 57, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 57, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 58, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 58, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 59, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 59, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 60, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 60, "usage_type": "name"}, {"api_name": "statistics.mean", "line_number": 62, "usage_type": "call"}, {"api_name": "statistics.variance", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 82, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 82, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 83, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 83, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 84, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 84, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 85, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 85, "usage_type": "name"}]} +{"seq_id": "9229260272", "text": "import pygame\n#в данном файле представленные константы для работы программы\n#------------------------------------------------------------\nWIDTH, HEIGHT = 1200, 800 # размеры ОКНА\nFONT = 'EpilepsySansBold.ttf'\nwidth_d, height_d = 240, 160 # размеры происходящего в игре, игрового пространства?, display\nTILE_SIZE = 10\nFPS = 60\nPLAYER_VEL = 1\nBACKGROUND = pygame.transform.scale(pygame.image.load(\"working_sprites/background.png\"), (width_d, height_d))\nBACKGROUND_1 = pygame.image.load('working_sprites/background0.png')\nIMAGE_BULLET = pygame.image.load('working_sprites/bullet.png')\nBG_WIDTH = BACKGROUND.get_width()\nVOLUME_MUSIC = 0.8\nVOLUME_SOUNDS = 0.5\nTILES = 1000\nSCROLL = 0\nSPAWN_ENEMY = pygame.USEREVENT + 1", "repo_name": "sprittzer/shooter", "sub_path": "code/settings.py", "file_name": "settings.py", "file_ext": "py", "file_size_in_byte": 822, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "pygame.transform.scale", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.transform", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 10, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 10, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 11, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 11, "usage_type": "attribute"}, {"api_name": "pygame.image.load", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.image", "line_number": 12, "usage_type": "attribute"}, {"api_name": "pygame.USEREVENT", "line_number": 18, "usage_type": "attribute"}]} +{"seq_id": "17866368713", "text": "import os\nimport csv\nimport re\nimport nltk\nfrom nltk.corpus import stopwords\nimport pandas as pd\nfrom pandas import DataFrame\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn import metrics\nfrom sklearn.metrics import precision_score, recall_score\nfrom sklearn.preprocessing import MultiLabelBinarizer\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom nltk.stem import WordNetLemmatizer\nimport matplotlib.pyplot as plt\nfrom wordcloud import WordCloud\n\n\n#including DFKI\ndata1 = pd.read_csv(\"DFKI/P07_1001-50.csv\", sep=\"\\t\", index_col=False, encoding='latin-1', low_memory=False)\ndf1 = DataFrame(data1)\ndf1 = df1[df1.label != 'Neutral'] #removes rows with Netral label\nx1 = df1.data\ny1 = df1.label\n\ndata2 = pd.read_csv(\"DFKI/P08_1009-50.csv\", sep=\"\\t\", index_col=False, encoding='latin-1', low_memory=False)\ndf2 = DataFrame(data2)\ndf2 = df2[df2.label != 'Neutral']\nx2 = df2.data\ny2 = df2.label\n\ndata3 = pd.read_csv(\"DFKI/P08_2001-30.csv\", sep=\"\\t\", index_col=False, encoding='latin-1', low_memory=False)\ndf3 = DataFrame(data3)\ndf3 = df3[df3.label != 'Neutral']\nx3 = df3.data\ny3 = df3.label\n\n# including IMS\ndata4 = pd.read_csv(\"corpus2.csv\", sep=\",\", index_col=False, encoding='latin-1', low_memory=False)\ndf4 = DataFrame(data4)\nx4 = df4.data\ny4 = df4.label\n\nframes = [df1, df2, df3, df4]\ndf = pd.concat(frames, ignore_index=True)\n\n# hedging words to list\nwith open('hedges.csv', 'r') as f:\n reader = csv.reader(f)\n hedges = list(reader)\n# hedging (list of list) to list\nhedges_list = [item for sublist in hedges for item in sublist]\nparagraphs = df['data']\n\n# drawing wordcloud\ndef wordcloud_draw(data, color='white'):\n words = ' '.join(data)\n # cleaned_word = \" \".join([word for word in words.split()\n # if 'http' not in word\n # and not word.startswith('@')\n # and not word.startswith('#')\n # and word != 'RT'\n # ])\n wordcloud = WordCloud(background_color=color,\n width=2500,\n height=2000\n ).generate(words)\n plt.figure(1, figsize=(13, 13))\n plt.imshow(wordcloud)\n plt.axis('off')\n plt.show()\n #plt.savefig('word_cloud') #uncomment to save picture\n\n# uncomment next 2 lines to generate pic\n# print(\"Hedging Cues\")\n# wordcloud_draw(hedges_list)\n\n# preprocessing list of paragraphs\n# nltk.download('wordnet')\nlemmatizer = WordNetLemmatizer()\n\ndef lemmatize(word):\n lemmatizer = WordNetLemmatizer()\n return lemmatizer.lemmatize(word=word)\n\n# for sent in paragraphs:\nparagraphs = paragraphs.str.replace('[^a-zA-Z0-9-_*.]', ' ')\npre_para = [\" \".join([lemmatize(word) for word in sentence.split(\" \")]) for sentence in paragraphs]\n\n#replacing list of list to list\n\nlabel_list = df['label'].values.tolist()\n\n# joing lists\ncomplete_list = []\nfor sent1, sent2 in zip(pre_para, label_list):\n complete_list.append([sent1, sent2])\n\n# removing repeated things\ndef unique(seq):\n # order preserving\n checked = []\n for e in seq:\n if e not in checked:\n checked.append(e)\n return checked\n\nunique_list = unique(complete_list)\n\n# getting 179 positives and negatives\nlist_179 = []\nj = 0\nk = 0\nfor i in unique_list:\n if i[1] == 'Negative' and j < 179:\n list_179.append(i)\n j = j + 1\n if i[1] == 'Positive' and k < 179:\n list_179.append(i)\n k = k + 1\n# getting only sentences from list_179\nlist_358 = [item[0] for item in list_179]\n\n\n# implementing hedge cue detection\nhedges_bool = []\nfor word in hedges_list:\n for sent in list_358:\n found = False\n found_hedge_false = False\n # if word in sent:\n for i in hedges_bool:\n if i[0] == sent and i[1] == 'hedge_true': # checks if true is repeated\n found = True\n break\n if i[0] == sent and i[1] == 'hedge_false': # checks if false is repeated\n found_hedge_false = True\n if found is True: # if hedge_true\n break\n if sent.find(word) != -1 and found_hedge_false is True: # checks two idexes if same\n hedges_bool.remove([sent, 'hedge_false'])\n hedges_bool.append([sent, 'hedge_true'])\n if found_hedge_false is True: # it can become true in further steps\n continue\n else:\n hedges_bool.append([sent, 'hedge_false'])\n\n# x and y\nx_data = []\ny_data = []\nfor sent1, sent2 in zip(label_list, hedges_bool):\n x_data.append(sent2[0])\n y_data.append([sent1, sent2[1]])\n\n# classification\n\ndef labelEncoding(y_data):\n mlb = MultiLabelBinarizer()\n y_binarized = mlb.fit_transform(y_data)\n return y_binarized\n\n\ndef tfidfVectorizer(x_data):\n stopset = stopwords.words('english')\n vect = TfidfVectorizer(analyzer='word', encoding='utf-8', min_df = 0, ngram_range=(1, 2), lowercase = True, strip_accents='ascii', stop_words = stopset)\n X_vec = vect.fit_transform(x_data)\n return X_vec\n\n\nX_vec = tfidfVectorizer(x_data)\ny_encoded = labelEncoding(y_data)\n\n\ndef splitTestTrain(X_vec, y_encoded):\n X_train, X_test, y_train, y_test = train_test_split(X_vec, y_encoded,\ntest_size=0.2, random_state=0)\n return X_train, X_test, y_train, y_test\n\nX_train, X_test, y_train, y_test = splitTestTrain(X_vec, y_encoded)\n\ndef applyDecisionTreeClassifier(X_train, y_train, X_test, y_test):\n # Model Training: DecisionTreeClassifier\n DT_classifier = DecisionTreeClassifier(random_state=0)\n DT_classifier.fit(X_train, y_train)\n print(DT_classifier)\n model_accuracies = cross_val_score(estimator=DT_classifier,\n X=X_train, y=y_train, cv=10)\n print(\"Model Accuracies Mean\", model_accuracies.mean()*100)\n print(\"Model Accuracies Standard Devision\", model_accuracies.std()*100)\n # Model Testing: DTs\n y_pred = DT_classifier.predict(X_test)\n metrics.confusion_matrix(y_test.argmax(axis=1), y_pred.argmax(axis=1))\n test_accuracy = metrics.accuracy_score(y_test, y_pred)\n precision_DT = precision_score(y_test, y_pred, average='weighted')\n recall_DT = recall_score(y_test, y_pred, average='weighted')\n f_DT = 2*(precision_DT * recall_DT) / (precision_DT + recall_DT)\n print(\"Decision Tree Classifier Test Accuracy: \", test_accuracy*100)\n print(\"Decision Tree Classifier Test Precision: \", precision_DT*100)\n print(\"Decision Tree Classifier Test Recall: \", recall_DT*100)\n print(\"Decision Tree Classifier Test F measure: \", f_DT*100)\n return test_accuracy, precision_DT, recall_DT, f_DT\n\naccuracy, precision, recall, f1_score = applyDecisionTreeClassifier(X_train, y_train, X_test, y_test)\nprint(accuracy)\n\ndef applyKNeighborsClassifier(X_train, y_train, X_test, y_test):\n # Model Training: KNN\n knn_classifier = KNeighborsClassifier(n_neighbors=3)\n knn_classifier.fit(X_train, y_train)\n print(knn_classifier)\n model_accuracies = cross_val_score(estimator=knn_classifier,\n X=X_train, y=y_train, cv=10)\n print(\"Model Accuracies Mean\", model_accuracies.mean()*100)\n print(\"Model Accuracies Standard Devision\", model_accuracies.std()*100)\n # Model Testing: Knn\n y_pred = knn_classifier.predict(X_test)\n metrics.confusion_matrix(y_test.argmax(axis=1), y_pred.argmax(axis=1))\n test_accuracy = metrics.accuracy_score(y_test, y_pred)\n precision_KNN = precision_score(y_test, y_pred, average='weighted')\n recall_KNN = recall_score(y_test, y_pred, average='weighted')\n f_KNN = 2*(precision_KNN * recall_KNN) / (precision_KNN + recall_KNN)\n print(\"KNNs Test Accuracy: \", test_accuracy*100)\n print(\"KNNs Test Precision: \", precision_KNN*100)\n print(\"KNNs Test Recall: \", recall_KNN*100)\n print(\"KNNs Test F measure: \", f_KNN*100)\n return test_accuracy, precision_KNN, recall_KNN, f_KNN\n\naccuracy, precision, recall, f1_score = applyKNeighborsClassifier(X_train, y_train, X_test, y_test)\nprint(accuracy)", "repo_name": "aatif1/Classification_of_Scientific_Citations", "sub_path": "with_hedging.py", "file_name": "with_hedging.py", "file_ext": "py", "file_size_in_byte": 8123, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 22, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 27, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 28, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 33, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 34, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 40, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 41, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 46, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 50, "usage_type": "call"}, {"api_name": "wordcloud.WordCloud", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.figure", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.imshow", "line_number": 70, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 70, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 71, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 71, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 72, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 72, "usage_type": "name"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 81, "usage_type": "call"}, {"api_name": "nltk.stem.WordNetLemmatizer", "line_number": 84, "usage_type": "call"}, {"api_name": "sklearn.preprocessing.MultiLabelBinarizer", "line_number": 159, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords.words", "line_number": 165, "usage_type": "call"}, {"api_name": "nltk.corpus.stopwords", "line_number": 165, "usage_type": "name"}, {"api_name": "sklearn.feature_extraction.text.TfidfVectorizer", "line_number": 166, "usage_type": "call"}, {"api_name": "sklearn.model_selection.train_test_split", "line_number": 176, "usage_type": "call"}, {"api_name": "sklearn.tree.DecisionTreeClassifier", "line_number": 184, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 187, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 193, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 193, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 194, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 194, "usage_type": "name"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 195, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 196, "usage_type": "call"}, {"api_name": "sklearn.neighbors.KNeighborsClassifier", "line_number": 209, "usage_type": "call"}, {"api_name": "sklearn.model_selection.cross_val_score", "line_number": 212, "usage_type": "call"}, {"api_name": "sklearn.metrics.confusion_matrix", "line_number": 218, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 218, "usage_type": "name"}, {"api_name": "sklearn.metrics.accuracy_score", "line_number": 219, "usage_type": "call"}, {"api_name": "sklearn.metrics", "line_number": 219, "usage_type": "name"}, {"api_name": "sklearn.metrics.precision_score", "line_number": 220, "usage_type": "call"}, {"api_name": "sklearn.metrics.recall_score", "line_number": 221, "usage_type": "call"}]} +{"seq_id": "32349796768", "text": "import cv2 as cv\nimport numpy as np\nimport torch\nimport os\n\n\ndef distinguish_image(path, high_path, low_path, threshold):\n \"\"\"\n distinguish image by mean pixel value\n :param path: initial images path,type=string\n :param high_path: the path you store the high illumination image,type=string\n :param low_path: the path you store the low illumination image,type=string\n :param threshold: the threshold between low&high illumination image,type=int|float\n :return: None\n \"\"\"\n # path = \"C:/Users/comin/Desktop/highlightdata256/\"\n imgs = os.listdir(path) # images name list under path\n num_low, num_high = 0, 0\n for i in imgs:\n img = cv.imread(path + str(i))\n tensor_cv = torch.from_numpy(img)\n if tensor_cv.float().mean() >= threshold:\n cv.imwrite(high_path + str(i), img)\n num_high += 1\n else:\n cv.imwrite(low_path + str(i), img)\n num_low += 1\n\n print('low illumination numbers:%d,high illumination numbers:%d' % (num_low, num_high))\n\n\n# example\ninit_path = \"C:/Users/comin/Desktop/highlightdata256/\"\nt_high_path = \"C:/Users/comin/Desktop/high/\"\nt_low_paht = \"C:/Users/comin/Desktop/low/\"\nt_threshold = 40\ndistinguish_image(init_path, t_high_path, t_low_paht, t_threshold)\n", "repo_name": "BingGoX/LightPollutionRemovalReasrch", "sub_path": "src/tools/DistinguishImg.py", "file_name": "DistinguishImg.py", "file_ext": "py", "file_size_in_byte": 1280, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.listdir", "line_number": 17, "usage_type": "call"}, {"api_name": "cv2.imread", "line_number": 20, "usage_type": "call"}, {"api_name": "torch.from_numpy", "line_number": 21, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 23, "usage_type": "call"}, {"api_name": "cv2.imwrite", "line_number": 26, "usage_type": "call"}]} +{"seq_id": "45567854073", "text": "from __future__ import print_function\nimport sys\nimport os\nimport tempfile\nimport time\nimport multiprocessing as mp\nimport unittest\nimport random\nimport mxnet as mx\nimport numpy as np\nimport unittest\nimport math\nfrom nose.tools import assert_raises\nfrom mxnet.test_utils import check_consistency, set_default_context, assert_almost_equal\nfrom mxnet.base import MXNetError\nfrom mxnet import autograd\nfrom numpy.testing import assert_allclose\nfrom mxnet.test_utils import rand_ndarray\n\n\ncurr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))\nsys.path.insert(0, os.path.join(curr_path, '../unittest'))\nfrom common import setup_module, with_seed, teardown, assert_raises_cudnn_not_satisfied\nfrom test_gluon import *\nfrom test_loss import *\nfrom test_gluon_rnn import *\n\nset_default_context(mx.gpu(0))\n\ndef check_rnn_layer(layer):\n layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])\n with mx.gpu(0):\n x = mx.nd.ones((10, 16, 30))\n states = layer.begin_state(16)\n go, gs = layer(x, states)\n\n with mx.cpu(0):\n x = mx.nd.ones((10, 16, 30))\n states = layer.begin_state(16)\n co, cs = layer(x, states)\n\n # atol of 1e-6 required, as exposed by seed 2124685726\n assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)\n for g, c in zip(gs, cs):\n assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)\n\n@with_seed()\ndef check_rnn_layer_w_rand_inputs(layer):\n layer.collect_params().initialize(ctx=[mx.cpu(0), mx.gpu(0)])\n x = mx.nd.uniform(shape=(10, 16, 30))\n with mx.gpu(0):\n x = x.copyto(mx.gpu(0))\n states = layer.begin_state(16)\n go, gs = layer(x, states)\n\n with mx.cpu(0):\n x = x.copyto(mx.cpu(0))\n states = layer.begin_state(16)\n co, cs = layer(x, states)\n\n assert_almost_equal(go.asnumpy(), co.asnumpy(), rtol=1e-2, atol=1e-6)\n for g, c in zip(gs, cs):\n assert_almost_equal(g.asnumpy(), c.asnumpy(), rtol=1e-2, atol=1e-6)\n\n\n@with_seed()\n@assert_raises_cudnn_not_satisfied(min_version='7.2.1')\ndef test_lstmp():\n hidden_size, projection_size = 3, 2\n rtol, atol = 1e-2, 1e-2\n batch_size, seq_len = 7, 11\n input_size = 5\n lstm_input = mx.nd.uniform(shape=(seq_len, batch_size, input_size), ctx=mx.gpu(0))\n shapes = {'i2h_weight': (hidden_size*4, input_size),\n 'h2h_weight': (hidden_size*4, projection_size),\n 'i2h_bias': (hidden_size*4,),\n 'h2h_bias': (hidden_size*4,),\n 'h2r_weight': (projection_size, hidden_size)}\n weights = {k: rand_ndarray(v) for k, v in shapes.items()}\n lstm_layer = gluon.rnn.LSTM(hidden_size, projection_size=projection_size,\n input_size=input_size, prefix='lstm0_')\n lstm_cell = gluon.contrib.rnn.LSTMPCell(hidden_size=hidden_size,\n projection_size=projection_size,\n input_size=input_size,\n prefix='lstm0_l0_')\n lstm_layer.initialize(ctx=mx.gpu(0))\n lstm_cell.initialize(ctx=mx.gpu(0))\n layer_params = lstm_layer.collect_params()\n cell_params = lstm_cell.collect_params()\n for k, v in weights.items():\n layer_params['lstm0_l0_'+k].set_data(v.copy())\n cell_params['lstm0_l0_'+k].set_data(v.copy())\n with autograd.record():\n layer_output = lstm_layer(lstm_input.copy())\n cell_output = lstm_cell.unroll(seq_len, lstm_input.copy(), layout='TNC',\n merge_outputs=True)[0]\n assert_almost_equal(layer_output.asnumpy(), cell_output.asnumpy(), rtol=rtol, atol=atol)\n layer_output.backward()\n cell_output.backward()\n for k, v in weights.items():\n layer_grad = layer_params['lstm0_l0_'+k].grad()\n cell_grad = cell_params['lstm0_l0_'+k].grad()\n print('checking gradient for {}'.format('lstm0_l0_'+k))\n assert_almost_equal(layer_grad.asnumpy(), cell_grad.asnumpy(),\n rtol=rtol, atol=atol)\n check_rnn_layer_forward(gluon.rnn.LSTM(10, 2, projection_size=5), mx.nd.ones((8, 3, 20)))\n check_rnn_layer_forward(gluon.rnn.LSTM(10, 2, projection_size=5, bidirectional=True), mx.nd.ones((8, 3, 20)), [mx.nd.ones((4, 3, 5)), mx.nd.ones((4, 3, 10))])\n\n check_rnn_layer_forward(gluon.rnn.LSTM(10, 2, dropout=0.5, projection_size=5), mx.nd.ones((8, 3, 20)),\n run_only=True)\n check_rnn_layer_forward(gluon.rnn.LSTM(10, 2, bidirectional=True, dropout=0.5, projection_size=5),\n mx.nd.ones((8, 3, 20)),\n [mx.nd.ones((4, 3, 5)), mx.nd.ones((4, 3, 10))], run_only=True)\n\n\n@with_seed()\n@assert_raises_cudnn_not_satisfied(min_version='7.2.1')\ndef test_lstm_clip():\n hidden_size, projection_size = 4096, 2048\n batch_size, seq_len = 32, 80\n input_size = 50\n clip_min, clip_max, clip_nan = -5, 5, True\n lstm_input = mx.nd.uniform(shape=(seq_len, batch_size, input_size), ctx=mx.gpu(0))\n lstm_states = [mx.nd.uniform(shape=(2, batch_size, projection_size), ctx=mx.gpu(0)),\n mx.nd.uniform(shape=(2, batch_size, hidden_size), ctx=mx.gpu(0))]\n lstm_layer = gluon.rnn.LSTM(hidden_size, projection_size=projection_size,\n input_size=input_size, prefix='lstm0_',\n bidirectional=True,\n state_clip_min=clip_min,\n state_clip_max=clip_max,\n state_clip_nan=clip_nan)\n lstm_layer.initialize(ctx=mx.gpu(0))\n with autograd.record():\n _, layer_output_states = lstm_layer(lstm_input, lstm_states)\n cell_states = layer_output_states[0].asnumpy()\n assert (cell_states >= clip_min).all() and (cell_states <= clip_max).all()\n assert not np.isnan(cell_states).any()\n\n\n@with_seed()\n@assert_raises_cudnn_not_satisfied(min_version='5.1.10')\ndef test_rnn_layer():\n check_rnn_layer(gluon.rnn.RNN(100, num_layers=3))\n check_rnn_layer(gluon.rnn.RNN(100, activation='tanh', num_layers=3))\n check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3))\n check_rnn_layer(gluon.rnn.GRU(100, num_layers=3))\n\n check_rnn_layer(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))\n check_rnn_layer_w_rand_inputs(gluon.rnn.LSTM(100, num_layers=3, bidirectional=True))\n\n\ndef check_layer_bidirectional(size, in_size, proj_size):\n class RefBiLSTM(gluon.Block):\n def __init__(self, size, proj_size, **kwargs):\n super(RefBiLSTM, self).__init__(**kwargs)\n with self.name_scope():\n self._lstm_fwd = gluon.rnn.LSTM(size, projection_size=proj_size, bidirectional=False, prefix='l0')\n self._lstm_bwd = gluon.rnn.LSTM(size, projection_size=proj_size, bidirectional=False, prefix='r0')\n\n def forward(self, inpt):\n fwd = self._lstm_fwd(inpt)\n bwd_inpt = nd.flip(inpt, 0)\n bwd = self._lstm_bwd(bwd_inpt)\n bwd = nd.flip(bwd, 0)\n return nd.concat(fwd, bwd, dim=2)\n weights = {}\n for d in ['l', 'r']:\n weights['lstm_{}0_i2h_weight'.format(d)] = mx.random.uniform(shape=(size*4, in_size))\n if proj_size:\n weights['lstm_{}0_h2h_weight'.format(d)] = mx.random.uniform(shape=(size*4, proj_size))\n weights['lstm_{}0_h2r_weight'.format(d)] = mx.random.uniform(shape=(proj_size, size))\n else:\n weights['lstm_{}0_h2h_weight'.format(d)] = mx.random.uniform(shape=(size*4, size))\n weights['lstm_{}0_i2h_bias'.format(d)] = mx.random.uniform(shape=(size*4,))\n weights['lstm_{}0_h2h_bias'.format(d)] = mx.random.uniform(shape=(size*4,))\n\n net = gluon.rnn.LSTM(size, projection_size=proj_size, bidirectional=True, prefix='lstm_')\n ref_net = RefBiLSTM(size, proj_size, prefix='lstm_')\n net.initialize()\n ref_net.initialize()\n net_params = net.collect_params()\n ref_net_params = ref_net.collect_params()\n for k in weights:\n net_params[k].set_data(weights[k])\n ref_net_params[k.replace('l0', 'l0l0').replace('r0', 'r0l0')].set_data(weights[k])\n\n data = mx.random.uniform(shape=(11, 10, in_size))\n assert_allclose(net(data).asnumpy(), ref_net(data).asnumpy())\n\n@with_seed()\n@assert_raises_cudnn_not_satisfied(min_version='5.1.10')\ndef test_layer_bidirectional():\n check_layer_bidirectional(7, 5, 0)\n\n@with_seed()\n@assert_raises_cudnn_not_satisfied(min_version='7.2.1')\ndef test_layer_bidirectional_proj():\n check_layer_bidirectional(7, 5, 3)\n\n\n@with_seed()\n@assert_raises_cudnn_not_satisfied(min_version='5.1.10')\ndef test_rnn_layer_begin_state_type():\n fake_data = nd.random.uniform(shape=(3, 5, 7), dtype='float16')\n modeling_layer = gluon.rnn.LSTM(hidden_size=11, num_layers=2, dropout=0.2, bidirectional=True)\n modeling_layer.cast('float16')\n modeling_layer.initialize()\n modeling_layer(fake_data)\n\n\ndef test_gluon_ctc_consistency():\n loss = mx.gluon.loss.CTCLoss()\n data = mx.nd.arange(0, 4, repeat=40, ctx=mx.gpu(0)).reshape((2,20,4)).flip(axis=0)\n cpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.cpu(0))\n gpu_label = mx.nd.array([[2,1,-1,-1],[3,2,2,-1]], ctx=mx.gpu(0))\n\n cpu_data = data.copy().as_in_context(mx.cpu(0))\n cpu_data.attach_grad()\n with mx.autograd.record():\n l_cpu = loss(cpu_data, cpu_label)\n l_cpu.backward()\n\n gpu_data = data.copyto(mx.gpu(0))\n gpu_data.attach_grad()\n with mx.autograd.record():\n l_gpu = loss(gpu_data, gpu_label)\n l_gpu.backward()\n\n assert_almost_equal(cpu_data.grad.asnumpy(), gpu_data.grad.asnumpy(), atol=1e-3, rtol=1e-3)\n\n\n@with_seed()\ndef test_global_norm_clip_multi_device():\n for check_isfinite in [True, False]:\n x1 = mx.nd.ones((3,3), ctx=mx.gpu(0))\n x2 = mx.nd.ones((4,4), ctx=mx.cpu(0))\n norm = gluon.utils.clip_global_norm([x1, x2], 1.0, check_isfinite=check_isfinite)\n if check_isfinite:\n assert norm == 5.0\n else:\n assert norm.asscalar() == 5.0\n assert_almost_equal(x1.asnumpy(), np.ones((3, 3)) / 5)\n assert_almost_equal(x2.asnumpy(), np.ones((4, 4)) / 5)\n\n\ndef _check_batchnorm_result(input, num_devices=1, cuda=False):\n from mxnet.gluon.utils import split_and_load\n def _find_bn(module):\n if isinstance(module, (mx.gluon.nn.BatchNorm, mx.gluon.contrib.nn.SyncBatchNorm)):\n return module\n elif isinstance(module.module, (mx.gluon.nn.BatchNorm, mx.gluon.contrib.nn.SyncBatchNorm)):\n return module.module\n\n raise RuntimeError('BN not found')\n\n def _syncParameters(bn1, bn2, ctx):\n ctx = input.context\n bn2.gamma.set_data(bn1.gamma.data(ctx))\n bn2.beta.set_data(bn1.beta.data(ctx))\n bn2.running_mean.set_data(bn1.running_mean.data(ctx))\n bn2.running_var.set_data(bn1.running_var.data(ctx))\n\n input1 = input.copy()\n input2 = input.copy()\n\n if cuda:\n input1 = input.as_in_context(mx.gpu(0))\n ctx_list = [mx.gpu(i) for i in range(num_devices)]\n else:\n ctx_list = [mx.cpu(0) for _ in range(num_devices)]\n\n nch = input.shape[1]\n bn1 = mx.gluon.nn.BatchNorm(in_channels=nch)\n bn2 = mx.gluon.contrib.nn.SyncBatchNorm(in_channels=nch, num_devices=num_devices)\n\n bn1.initialize(ctx=ctx_list[0])\n bn2.initialize(ctx=ctx_list)\n\n # using the same values for gamma and beta\n #_syncParameters(_find_bn(bn1), _find_bn(bn2), ctx_list[0])\n\n input1.attach_grad()\n inputs2 = split_and_load(input2, ctx_list, batch_axis=0)\n for xi in inputs2:\n xi.attach_grad()\n\n with mx.autograd.record():\n output1 = bn1(input1)\n output2 = [bn2(xi) for xi in inputs2]\n loss1 = (output1 ** 2).sum()\n loss2 = [(output ** 2).sum() for output in output2]\n mx.autograd.backward(loss1)\n mx.autograd.backward(loss2)\n\n output2 = mx.nd.concat(*[output.as_in_context(input.context) for output in output2], dim=0)\n # assert forwarding\n assert_almost_equal(input1.asnumpy(), input2.asnumpy(), atol=1e-3, rtol=1e-3)\n assert_almost_equal(output1.asnumpy(), output2.asnumpy(), atol=1e-3, rtol=1e-3)\n assert_almost_equal(_find_bn(bn1).running_mean.data(ctx_list[0]).asnumpy(),\n _find_bn(bn2).running_mean.data(ctx_list[0]).asnumpy(),\n atol=1e-3, rtol=1e-3)\n assert_almost_equal(_find_bn(bn1).running_var.data(ctx_list[0]).asnumpy(),\n _find_bn(bn2).running_var.data(ctx_list[0]).asnumpy(),\n atol=1e-3, rtol=1e-3)\n input2grad = mx.nd.concat(*[output.grad.as_in_context(input.context) for output in inputs2], dim=0)\n assert_almost_equal(input1.grad.asnumpy(), input2grad.asnumpy(), atol=1e-3, rtol=1e-3)\n\n@with_seed()\ndef test_sync_batchnorm():\n def get_num_devices():\n for i in range(100):\n try:\n mx.nd.zeros((1,), ctx=mx.gpu(i))\n except:\n return i\n # no need to use SyncBN with 1 gpu\n if get_num_devices() < 2:\n return\n ndev = 2\n # check with unsync version\n for i in range(10):\n _check_batchnorm_result(mx.nd.random.uniform(shape=(4, 1, 4, 4)),\n num_devices=ndev, cuda=True)\n\n\n@with_seed()\ndef test_symbol_block_fp16():\n # Test case to verify if initializing the SymbolBlock from a model with params\n # other than fp32 param dtype.\n\n # 1. Load a resnet model, cast it to fp16 and export\n tmp = tempfile.mkdtemp()\n tmpfile = os.path.join(tmp, 'resnet34_fp16')\n ctx = mx.gpu(0)\n\n net_fp32 = mx.gluon.model_zoo.vision.resnet34_v2(pretrained=True, ctx=ctx, root=tmp)\n net_fp32.cast('float16')\n net_fp32.hybridize()\n data = mx.nd.zeros((1,3,224,224), dtype='float16', ctx=ctx)\n net_fp32.forward(data)\n net_fp32.export(tmpfile, 0)\n\n # 2. Load the saved model and verify if all the params are loaded correctly.\n # and choose one of the param to verify the type if fp16.\n sm = mx.sym.load(tmpfile + '-symbol.json')\n inputs = mx.sym.var('data', dtype='float16')\n net_fp16 = mx.gluon.SymbolBlock(sm, inputs)\n net_fp16.collect_params().load(tmpfile + '-0000.params', ctx=ctx)\n # 3. Get a conv layer's weight parameter name. Conv layer's weight param is\n # expected to be of dtype casted, fp16.\n for param_name in net_fp16.params.keys():\n if 'conv' in param_name and 'weight' in param_name:\n break\n assert np.dtype(net_fp16.params[param_name].dtype) == np.dtype(np.float16)\n\n\n@with_seed()\ndef test_large_models():\n ctx = default_context()\n # Create model\n net = gluon.nn.HybridSequential()\n\n largest_num_features = 256\n with net.name_scope():\n net.add(nn.Conv2D(largest_num_features, 3))\n\n net.hybridize()\n net.initialize(mx.init.Normal(sigma=0.01), ctx=ctx)\n\n # Compute the height (=width) of the square tensor of the given size in bytes\n def tensor_size(big_tensor_bytes):\n bytes_per_float = 4\n sz = int(math.sqrt(big_tensor_bytes / largest_num_features / bytes_per_float))\n return (sz // 100) * 100\n\n # The idea is to create models with large tensors of (say) 20% of the total memory.\n # This in the past has given cudnnFind() trouble when it needed to allocate similar I/O's\n # from the area carved out by the MXNET_GPU_MEM_POOL_RESERVE setting (by default 5%).\n (free_mem_bytes, total_mem_bytes) = mx.context.gpu_memory_info(ctx.device_id)\n start_size = tensor_size(0.20 * total_mem_bytes)\n num_trials = 10\n sys.stderr.write(' testing global memory of size {} ... '.format(total_mem_bytes))\n sys.stderr.flush()\n for i in range(num_trials):\n sz = start_size - 10 * i\n (height, width) = (sz,sz)\n sys.stderr.write(\" {}x{} \".format(height,width))\n sys.stderr.flush()\n data_in = nd.random_uniform(low=0, high=255, shape=(1, 3, height, width),\n ctx=ctx, dtype=\"float32\")\n # Evaluate model\n net(data_in).asnumpy()\n\n\nif __name__ == '__main__':\n import nose\n nose.runmodule()\n", "repo_name": "researchmm/tasn", "sub_path": "tasn-mxnet/tests/python/gpu/test_gluon_gpu.py", "file_name": "test_gluon_gpu.py", "file_ext": "py", "file_size_in_byte": 16168, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 216, "dataset": "github-code", "pt": "76", "api": [{"api_name": "os.path.dirname", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path", "line_number": 21, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 21, "usage_type": "call"}, {"api_name": "os.path.expanduser", "line_number": 21, "usage_type": "call"}, {"api_name": "sys.path.insert", "line_number": 22, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 22, "usage_type": "call"}, {"api_name": "os.path", "line_number": 22, "usage_type": "attribute"}, {"api_name": "mxnet.test_utils.set_default_context", "line_number": 28, "usage_type": "call"}, {"api_name": "mxnet.gpu", "line_number": 28, "usage_type": "call"}, {"api_name": "mxnet.cpu", "line_number": 31, "usage_type": "call"}, {"api_name": "mxnet.gpu", "line_number": 31, "usage_type": "call"}, {"api_name": "mxnet.gpu", "line_number": 32, "usage_type": "call"}, {"api_name": "mxnet.nd.ones", "line_number": 33, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 33, "usage_type": "attribute"}, {"api_name": "mxnet.cpu", "line_number": 37, "usage_type": "call"}, {"api_name": "mxnet.nd.ones", "line_number": 38, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 38, "usage_type": "attribute"}, {"api_name": "mxnet.test_utils.assert_almost_equal", "line_number": 43, "usage_type": "call"}, {"api_name": "mxnet.test_utils.assert_almost_equal", "line_number": 45, "usage_type": "call"}, {"api_name": "mxnet.cpu", "line_number": 49, "usage_type": "call"}, {"api_name": "mxnet.gpu", "line_number": 49, "usage_type": "call"}, {"api_name": "mxnet.nd.uniform", "line_number": 50, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 50, "usage_type": "attribute"}, {"api_name": "mxnet.gpu", "line_number": 51, "usage_type": "call"}, {"api_name": "mxnet.gpu", "line_number": 52, "usage_type": "call"}, {"api_name": "mxnet.cpu", "line_number": 56, "usage_type": "call"}, {"api_name": "mxnet.cpu", "line_number": 57, "usage_type": "call"}, {"api_name": "mxnet.test_utils.assert_almost_equal", "line_number": 61, "usage_type": "call"}, {"api_name": "mxnet.test_utils.assert_almost_equal", "line_number": 63, "usage_type": "call"}, {"api_name": "common.with_seed", "line_number": 47, "usage_type": "call"}, {"api_name": "mxnet.nd.uniform", "line_number": 73, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 73, "usage_type": "attribute"}, {"api_name": "mxnet.gpu", "line_number": 73, "usage_type": "call"}, {"api_name": "mxnet.test_utils.rand_ndarray", "line_number": 79, "usage_type": "call"}, {"api_name": "mxnet.gpu", "line_number": 86, "usage_type": "call"}, {"api_name": "mxnet.gpu", "line_number": 87, "usage_type": "call"}, {"api_name": "mxnet.autograd.record", "line_number": 93, "usage_type": "call"}, {"api_name": "mxnet.autograd", "line_number": 93, "usage_type": "name"}, {"api_name": "mxnet.test_utils.assert_almost_equal", "line_number": 97, "usage_type": "call"}, {"api_name": "mxnet.test_utils.assert_almost_equal", "line_number": 104, "usage_type": "call"}, {"api_name": "mxnet.nd.ones", "line_number": 106, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 106, "usage_type": "attribute"}, {"api_name": "mxnet.nd.ones", "line_number": 107, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 107, "usage_type": "attribute"}, {"api_name": "mxnet.nd.ones", "line_number": 109, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 109, "usage_type": "attribute"}, {"api_name": "mxnet.nd.ones", "line_number": 112, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 112, "usage_type": "attribute"}, {"api_name": "mxnet.nd.ones", "line_number": 113, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 113, "usage_type": "attribute"}, {"api_name": "common.with_seed", "line_number": 66, "usage_type": "call"}, {"api_name": "common.assert_raises_cudnn_not_satisfied", "line_number": 67, "usage_type": "call"}, {"api_name": "mxnet.nd.uniform", "line_number": 123, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 123, "usage_type": "attribute"}, {"api_name": "mxnet.gpu", "line_number": 123, "usage_type": "call"}, {"api_name": "mxnet.nd.uniform", "line_number": 124, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 124, "usage_type": "attribute"}, {"api_name": "mxnet.gpu", "line_number": 124, "usage_type": "call"}, {"api_name": "mxnet.nd.uniform", "line_number": 125, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 125, "usage_type": "attribute"}, {"api_name": "mxnet.gpu", "line_number": 125, "usage_type": "call"}, {"api_name": "mxnet.gpu", "line_number": 132, "usage_type": "call"}, {"api_name": "mxnet.autograd.record", "line_number": 133, "usage_type": "call"}, {"api_name": "mxnet.autograd", "line_number": 133, "usage_type": "name"}, {"api_name": "numpy.isnan", "line_number": 137, "usage_type": "call"}, {"api_name": "common.with_seed", "line_number": 116, "usage_type": "call"}, {"api_name": "common.assert_raises_cudnn_not_satisfied", "line_number": 117, "usage_type": "call"}, {"api_name": "common.with_seed", "line_number": 140, "usage_type": "call"}, {"api_name": "common.assert_raises_cudnn_not_satisfied", "line_number": 141, "usage_type": "call"}, {"api_name": "mxnet.random.uniform", "line_number": 168, "usage_type": "call"}, {"api_name": "mxnet.random", "line_number": 168, "usage_type": "attribute"}, {"api_name": "mxnet.random.uniform", "line_number": 170, "usage_type": "call"}, {"api_name": "mxnet.random", "line_number": 170, "usage_type": "attribute"}, {"api_name": "mxnet.random.uniform", "line_number": 171, "usage_type": "call"}, {"api_name": "mxnet.random", "line_number": 171, "usage_type": "attribute"}, {"api_name": "mxnet.random.uniform", "line_number": 173, "usage_type": "call"}, {"api_name": "mxnet.random", "line_number": 173, "usage_type": "attribute"}, {"api_name": "mxnet.random.uniform", "line_number": 174, "usage_type": "call"}, {"api_name": "mxnet.random", "line_number": 174, "usage_type": "attribute"}, {"api_name": "mxnet.random.uniform", "line_number": 175, "usage_type": "call"}, {"api_name": "mxnet.random", "line_number": 175, "usage_type": "attribute"}, {"api_name": "mxnet.random.uniform", "line_number": 187, "usage_type": "call"}, {"api_name": "mxnet.random", "line_number": 187, "usage_type": "attribute"}, {"api_name": "numpy.testing.assert_allclose", "line_number": 188, "usage_type": "call"}, {"api_name": "common.with_seed", "line_number": 190, "usage_type": "call"}, {"api_name": "common.assert_raises_cudnn_not_satisfied", "line_number": 191, "usage_type": "call"}, {"api_name": "common.with_seed", "line_number": 195, "usage_type": "call"}, {"api_name": "common.assert_raises_cudnn_not_satisfied", "line_number": 196, "usage_type": "call"}, {"api_name": "common.with_seed", "line_number": 201, "usage_type": "call"}, {"api_name": "common.assert_raises_cudnn_not_satisfied", "line_number": 202, "usage_type": "call"}, {"api_name": "mxnet.gluon.loss.CTCLoss", "line_number": 212, "usage_type": "call"}, {"api_name": "mxnet.gluon", "line_number": 212, "usage_type": "attribute"}, {"api_name": "mxnet.nd.arange", "line_number": 213, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 213, "usage_type": "attribute"}, {"api_name": "mxnet.gpu", "line_number": 213, "usage_type": "call"}, {"api_name": "mxnet.nd.array", "line_number": 214, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 214, "usage_type": "attribute"}, {"api_name": "mxnet.cpu", "line_number": 214, "usage_type": "call"}, {"api_name": "mxnet.nd.array", "line_number": 215, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 215, "usage_type": "attribute"}, {"api_name": "mxnet.gpu", "line_number": 215, "usage_type": "call"}, {"api_name": "mxnet.cpu", "line_number": 217, "usage_type": "call"}, {"api_name": "mxnet.autograd.record", "line_number": 219, "usage_type": "call"}, {"api_name": "mxnet.autograd", "line_number": 219, "usage_type": "attribute"}, {"api_name": "mxnet.gpu", "line_number": 223, "usage_type": "call"}, {"api_name": "mxnet.autograd.record", "line_number": 225, "usage_type": "call"}, {"api_name": "mxnet.autograd", "line_number": 225, "usage_type": "attribute"}, {"api_name": "mxnet.test_utils.assert_almost_equal", "line_number": 229, "usage_type": "call"}, {"api_name": "mxnet.nd.ones", "line_number": 235, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 235, "usage_type": "attribute"}, {"api_name": "mxnet.gpu", "line_number": 235, "usage_type": "call"}, {"api_name": "mxnet.nd.ones", "line_number": 236, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 236, "usage_type": "attribute"}, {"api_name": "mxnet.cpu", "line_number": 236, "usage_type": "call"}, {"api_name": "mxnet.test_utils.assert_almost_equal", "line_number": 242, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 242, "usage_type": "call"}, {"api_name": "mxnet.test_utils.assert_almost_equal", "line_number": 243, "usage_type": "call"}, {"api_name": "numpy.ones", "line_number": 243, "usage_type": "call"}, {"api_name": "common.with_seed", "line_number": 232, "usage_type": "call"}, {"api_name": "mxnet.gluon", "line_number": 249, "usage_type": "attribute"}, {"api_name": "mxnet.gluon", "line_number": 251, "usage_type": "attribute"}, {"api_name": "mxnet.gpu", "line_number": 267, "usage_type": "call"}, {"api_name": "mxnet.gpu", "line_number": 268, "usage_type": "call"}, {"api_name": "mxnet.cpu", "line_number": 270, "usage_type": "call"}, {"api_name": "mxnet.gluon.nn.BatchNorm", "line_number": 273, "usage_type": "call"}, {"api_name": "mxnet.gluon", "line_number": 273, "usage_type": "attribute"}, {"api_name": "mxnet.gluon.contrib.nn.SyncBatchNorm", "line_number": 274, "usage_type": "call"}, {"api_name": "mxnet.gluon", "line_number": 274, "usage_type": "attribute"}, {"api_name": "mxnet.gluon.utils.split_and_load", "line_number": 283, "usage_type": "call"}, {"api_name": "mxnet.autograd.record", "line_number": 287, "usage_type": "call"}, {"api_name": "mxnet.autograd", "line_number": 287, "usage_type": "attribute"}, {"api_name": "mxnet.autograd.backward", "line_number": 292, "usage_type": "call"}, {"api_name": "mxnet.autograd", "line_number": 292, "usage_type": "attribute"}, {"api_name": "mxnet.autograd.backward", "line_number": 293, "usage_type": "call"}, {"api_name": "mxnet.autograd", "line_number": 293, "usage_type": "attribute"}, {"api_name": "mxnet.nd.concat", "line_number": 295, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 295, "usage_type": "attribute"}, {"api_name": "mxnet.test_utils.assert_almost_equal", "line_number": 297, "usage_type": "call"}, {"api_name": "mxnet.test_utils.assert_almost_equal", "line_number": 298, "usage_type": "call"}, {"api_name": "mxnet.test_utils.assert_almost_equal", "line_number": 299, "usage_type": "call"}, {"api_name": "mxnet.test_utils.assert_almost_equal", "line_number": 302, "usage_type": "call"}, {"api_name": "mxnet.nd.concat", "line_number": 305, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 305, "usage_type": "attribute"}, {"api_name": "mxnet.test_utils.assert_almost_equal", "line_number": 306, "usage_type": "call"}, {"api_name": "mxnet.nd.zeros", "line_number": 313, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 313, "usage_type": "attribute"}, {"api_name": "mxnet.gpu", "line_number": 313, "usage_type": "call"}, {"api_name": "mxnet.nd.random.uniform", "line_number": 322, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 322, "usage_type": "attribute"}, {"api_name": "common.with_seed", "line_number": 308, "usage_type": "call"}, {"api_name": "tempfile.mkdtemp", "line_number": 332, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 333, "usage_type": "call"}, {"api_name": "os.path", "line_number": 333, "usage_type": "attribute"}, {"api_name": "mxnet.gpu", "line_number": 334, "usage_type": "call"}, {"api_name": "mxnet.gluon.model_zoo.vision.resnet34_v2", "line_number": 336, "usage_type": "call"}, {"api_name": "mxnet.gluon", "line_number": 336, "usage_type": "attribute"}, {"api_name": "mxnet.nd.zeros", "line_number": 339, "usage_type": "call"}, {"api_name": "mxnet.nd", "line_number": 339, "usage_type": "attribute"}, {"api_name": "mxnet.sym.load", "line_number": 345, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 345, "usage_type": "attribute"}, {"api_name": "mxnet.sym.var", "line_number": 346, "usage_type": "call"}, {"api_name": "mxnet.sym", "line_number": 346, "usage_type": "attribute"}, {"api_name": "mxnet.gluon.SymbolBlock", "line_number": 347, "usage_type": "call"}, {"api_name": "mxnet.gluon", "line_number": 347, "usage_type": "attribute"}, {"api_name": "numpy.dtype", "line_number": 354, "usage_type": "call"}, {"api_name": "numpy.float16", "line_number": 354, "usage_type": "attribute"}, {"api_name": "common.with_seed", "line_number": 326, "usage_type": "call"}, {"api_name": "mxnet.init.Normal", "line_number": 368, "usage_type": "call"}, {"api_name": "mxnet.init", "line_number": 368, "usage_type": "attribute"}, {"api_name": "math.sqrt", "line_number": 373, "usage_type": "call"}, {"api_name": "mxnet.context.gpu_memory_info", "line_number": 379, "usage_type": "call"}, {"api_name": "mxnet.context", "line_number": 379, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 382, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 382, "usage_type": "attribute"}, {"api_name": "sys.stderr.flush", "line_number": 383, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 383, "usage_type": "attribute"}, {"api_name": "sys.stderr.write", "line_number": 387, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 387, "usage_type": "attribute"}, {"api_name": "sys.stderr.flush", "line_number": 388, "usage_type": "call"}, {"api_name": "sys.stderr", "line_number": 388, "usage_type": "attribute"}, {"api_name": "common.with_seed", "line_number": 357, "usage_type": "call"}, {"api_name": "nose.runmodule", "line_number": 397, "usage_type": "call"}]} +{"seq_id": "72341040889", "text": "import os\nimport sys\n\nimport flask\nfrom flask import request, render_template, redirect, url_for\n\napp = flask.Flask(__name__)\n\nfrom User import persist_user\n# import User\n\n\n\n@app.route('/')\ndef home():\n return redirect(url_for('register'))\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n message = None\n try:\n if request.method == 'POST':\n data = request.form.to_dict()\n op = persist_user(data)\n # print('request params: ', data)\n print('op', op)\n message = 'Registered!'\n\n except Exception as e:\n render_template('register.html', error=e.message) # GET or invalid cred\n\n return render_template('register.html', message=message)\n\n\n\n# if __name__ == '__main__':\n# sys.path.append(os.path.dirname(__name__))\n# User.createDB()\n# app.run(debug=True)\n", "repo_name": "gurnoors/TravelBuddy", "sub_path": "com/fireeye/app.py", "file_name": "app.py", "file_ext": "py", "file_size_in_byte": 863, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "flask.Flask", "line_number": 7, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 16, "usage_type": "call"}, {"api_name": "flask.request.method", "line_number": 23, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 23, "usage_type": "name"}, {"api_name": "flask.request.form.to_dict", "line_number": 24, "usage_type": "call"}, {"api_name": "flask.request.form", "line_number": 24, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 24, "usage_type": "name"}, {"api_name": "User.persist_user", "line_number": 25, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 31, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 33, "usage_type": "call"}]} +{"seq_id": "7862425422", "text": "import torch\nfrom exp_reaching_goals.reaching_goals_utils import flatten_layers\nfrom exp_reaching_goals.network import actor, critic, signaling_net\n\nimport numpy as np\n\n\ndef calculate_critic_loss(critic, target_critic, loss_criterion, r, input, a, input_next, a_next, gamma):\n output_table = critic(input)\n target_output_table = target_critic(input_next)\n output = output_table[range(len(a)), a]\n target_output = target_output_table[range(len(a)), a_next]\n\n td_target = r + gamma * target_output\n critic_loss = loss_criterion(td_target, output)\n\n return critic_loss\n\n\ndef grad_and_step(net, net_optimizer, obj, type):\n assert type in ['descent', 'ascent']\n\n net_optimizer.zero_grad()\n net_grad = torch.autograd.grad(obj, list(net.parameters()), retain_graph=True)\n net_params = list(net.parameters())\n for layer in range(len(net_params)):\n if type == 'descent':\n net_params[layer].grad = net_grad[layer]\n else:\n net_params[layer].grad = - net_grad[layer]\n net_params[layer].grad.data.clamp_(-1, 1)\n net_optimizer.step()\n return\n\n\nclass sender_class(object):\n def __init__(self, config, device, id=0):\n self.name = name = 'sender'\n self.config = config\n self.device = device\n self.id = id\n self.dim_action = dim_action = config.env.dim_action\n self.epsilon = config.sender.epsilon_greedy\n\n # Gi(s,aj)\n self.critic_Gi = critic(config.n_channels.obs_sender, dim_action, config, belongto=name, name='critic_Gi',\n device=device)\n # Gi(s,aj)\n self.critic_Gj = critic(config.n_channels.obs_sender, dim_action, config, belongto=name, name='critic_Gj',\n device=device)\n # phi(sigma|s)\n self.signaling_net = signaling_net(config, device=device)\n\n self.critic_loss_criterion = torch.nn.MSELoss(reduction='mean')\n self.critic_Gi_optimizer = torch.optim.Adam(self.critic_Gi.parameters(), config.sender.lr_critic_Gi)\n self.critic_Gj_optimizer = torch.optim.Adam(self.critic_Gj.parameters(), config.sender.lr_critic_Gj)\n self.signaling_optimizer = torch.optim.Adam(self.signaling_net.parameters(), config.sender.lr_signal)\n\n # target critics\n self.target_critic_Gi = critic(config.n_channels.obs_sender, dim_action, config, belongto=name,\n name='target_critic_Gi', device=device)\n self.target_critic_Gi.load_state_dict(self.critic_Gi.state_dict())\n self.target_critic_Gj = critic(config.n_channels.obs_sender, dim_action, config, belongto=name,\n name='target_critic_Gj', device=device)\n self.target_critic_Gj.load_state_dict(self.critic_Gj.state_dict())\n\n self.temperature = 1\n\n def build_connection(self, receiver):\n self.receiver = receiver\n\n def calculate_v(self, critic, input_critic, phi, obs_receiver):\n # v(s) = \\sum_sigma phi(sigma|s) * \\sum_a pi(a|sigma) * Gi(s,a)\n batch_size = phi.shape[0]\n message_dim = phi.shape[1]\n all_message = torch.nn.functional.one_hot(torch.arange(message_dim)) \\\n .view(message_dim,\n self.config.env.map_height,\n self.config.env.map_width) \\\n .unsqueeze(dim=0).repeat(batch_size, 1, 1, 1).unsqueeze(dim=2).to(self.device)\n\n obs_receiver = obs_receiver.repeat(1, message_dim, 1, 1).unsqueeze(dim=2)\n obs_and_message_receiver = torch.cat([obs_receiver, all_message], dim=2)\n\n obs_and_message_receiver_flatten = obs_and_message_receiver.view(batch_size * message_dim, 2,\n obs_and_message_receiver.shape[-2],\n obs_and_message_receiver.shape[-1])\n\n _, pi_flatten = self.receiver.choose_action(obs_and_message_receiver_flatten)\n pi = pi_flatten.view(obs_and_message_receiver.shape[0], obs_and_message_receiver.shape[1], pi_flatten.shape[-1])\n pi_sum_all_message = torch.sum(pi * phi.unsqueeze(dim=2).repeat(1, 1, pi.shape[-1]), dim=1)\n\n g_table = critic(input_critic)\n v = torch.sum(g_table * pi_sum_all_message, dim=1)\n return v\n\n def calculate_2critics_loss(self, batch):\n ri = batch.data[batch.name_dict['ri']]\n rj = batch.data[batch.name_dict['rj']]\n obs_sender = batch.data[batch.name_dict['obs_sender']]\n obs_sender_next = batch.data[batch.name_dict['obs_sender_next']]\n aj = batch.data[batch.name_dict['a']]\n aj_next = batch.data[batch.name_dict['a_next']]\n\n critic_loss_Gi = calculate_critic_loss(self.critic_Gi, self.target_critic_Gi, self.critic_loss_criterion,\n ri, input=obs_sender, a=aj, input_next=obs_sender_next, a_next=aj_next,\n gamma=self.config.sender.gamma)\n critic_loss_Gj = calculate_critic_loss(self.critic_Gj, self.target_critic_Gj, self.critic_loss_criterion,\n rj, input=obs_sender, a=aj, input_next=obs_sender_next, a_next=aj_next,\n gamma=self.config.sender.gamma)\n return critic_loss_Gi, critic_loss_Gj\n\n def softupdate_2target_critics(self):\n tau = self.config.nn.target_critic_tau\n for tar, cur in zip(self.target_critic_Gi.parameters(), self.critic_Gi.parameters()):\n tar.data.copy_(cur.data * (1.0 - tau) + tar.data * tau)\n for tar, cur in zip(self.target_critic_Gj.parameters(), self.critic_Gj.parameters()):\n tar.data.copy_(cur.data * (1.0 - tau) + tar.data * tau)\n return\n\n def calculate_for_updating(self, batch):\n critic_loss_Gi, critic_loss_Gj = self.calculate_2critics_loss(batch)\n gradeta = self.calculate_gradeta(batch)\n\n return critic_loss_Gi, critic_loss_Gj, gradeta\n\n def update(self, critic_loss_Gi, critic_loss_Gj, gradeta):\n\n grad_and_step(self.critic_Gi, self.critic_Gi_optimizer, critic_loss_Gi, 'descent')\n grad_and_step(self.critic_Gj, self.critic_Gj_optimizer, critic_loss_Gj, 'descent')\n\n self.softupdate_2target_critics()\n\n self.signaling_optimizer.zero_grad()\n params = list(self.signaling_net.parameters())\n for i in range(len(list(self.signaling_net.parameters()))):\n params[i].grad = - gradeta[i] # gradient ascent\n params[i].grad.data.clamp_(-1, 1)\n self.signaling_optimizer.step()\n\n def send_message(self, obs):\n batch_size = len(obs)\n logits, phi = self.signaling_net(obs)\n phi = (1 - self.epsilon) * phi + self.epsilon / phi.shape[0]\n sample = torch.nn.functional.gumbel_softmax(logits, tau=self.temperature, hard=True)\n message = sample.view(batch_size, 1, self.config.env.map_height, self.config.env.map_width)\n return message, phi\n\n def calculate_gradeta(self, batch):\n obs_sender = batch.data[batch.name_dict['obs_sender']]\n aj = batch.data[batch.name_dict['a']]\n pij = batch.data[batch.name_dict['pi']]\n pij_aj = pij[range(len(aj)), aj]\n\n phi = batch.data[batch.name_dict['phi']]\n # phi_np = np.array(phi.detach())\n sigma = message = batch.data[batch.name_dict['message']]\n sigma_flatten = sigma.view(sigma.shape[0], -1)\n idx_flatten = torch.nonzero(sigma_flatten)[:, 1]\n phi_sigma = phi[range(idx_flatten.shape[0]), idx_flatten]\n\n ''' SG (Signaling Gradient) '''\n # s, aj\n Gi_table = self.critic_Gi(obs_sender)\n Gi = Gi_table[range(len(aj)), aj]\n obs_receiver = obs_sender[:, 0:1:, :,\n :] # This is for advantage. The other way is to make the receiver to tell the results, in which the sender doesn't need to have access to this var.\n Vi = self.calculate_v(self.critic_Gi, obs_sender, phi, obs_receiver)\n advantage_i = Gi - Vi\n\n log_phi_sigma = torch.log(phi_sigma)\n log_pij_aj = torch.log(pij_aj)\n\n # tuning for gumbel-softmax\n term = torch.mean(advantage_i.detach() * (log_phi_sigma\n + log_pij_aj * self.config.sender.coe_for_recovery_fromgumbel\n ))\n # term1 = torch.mean(Gi.detach() * log_phi_sigma)\n # term2 = torch.mean(Gi.detach() * log_pij_aj)\n # gradeta1 = torch.autograd.grad(term1, list(self.signaling_net.parameters()), retain_graph=True)\n # gradeta2 = torch.autograd.grad(term2, list(self.signaling_net.parameters()), retain_graph=True)\n\n gradeta = torch.autograd.grad(term, list(self.signaling_net.parameters()), retain_graph=True)\n gradeta_flatten = flatten_layers(gradeta)\n\n ''' BCE Obedience Constraint (Lagrangian) '''\n if not self.config.env.aligned_object:\n batch_len = aj.shape[0]\n sigma_counterfactual_index_flatten = torch.randint(self.config.env.map_height * self.config.env.map_width,\n size=(batch_len,)).to(self.device) # negative sampling\n sigma_counterfactual_index = [\n torch.floor(sigma_counterfactual_index_flatten / self.config.env.map_height).long().unsqueeze(dim=0),\n (sigma_counterfactual_index_flatten % self.config.env.map_width).unsqueeze(dim=0)]\n sigma_counterfactual_index = torch.cat(sigma_counterfactual_index).to(self.device)\n sigma_counterfactual = torch.zeros(batch_len, self.config.env.map_height, self.config.env.map_width,\n dtype=torch.double).to(self.device)\n sigma_counterfactual[range(batch_len), sigma_counterfactual_index[0], sigma_counterfactual_index[1]] = 1\n # sigma_counterfactual_np = np.array(sigma_counterfactual)\n sigma_counterfactual = sigma_counterfactual.unsqueeze(dim=1)\n obs_and_message_receiver = batch.data[batch.name_dict['obs_and_message_receiver']]\n obs_and_message_counterfactual_receiver = torch.cat([obs_and_message_receiver[:, 0:1, :, :],\n sigma_counterfactual], dim=1)\n _, pij_counterfactual = self.receiver.choose_action(obs_and_message_counterfactual_receiver)\n\n # s, aj\n Gj_table = self.critic_Gj(obs_sender)\n # Vj = self.calculate_v(self.critic_Gj, obs_sender, phi, obs_receiver)\n # advantage_j_table = Gj_table - Vj.unsqueeze(dim=1).repeat(1, self.dim_action)\n term1 = phi_sigma * torch.sum(pij.detach() * Gj_table.detach(), dim=1).unsqueeze(dim=1)\n term2 = phi_sigma.detach() * torch.sum(pij * Gj_table.detach(), dim=1).unsqueeze(dim=1)\n # term1 = phi_sigma * torch.sum(pij.detach() * advantage_j_table.detach(), dim=1).unsqueeze(dim=1)\n # term2 = phi_sigma.detach() * torch.sum(pij * advantage_j_table.detach(), dim=1).unsqueeze(dim=1)\n\n # every pixel\n term1_sum = torch.sum(term1, dim=1)\n term2_sum = torch.sum(term2, dim=1)\n\n term1_mean = torch.mean(term1_sum)\n term2_mean = torch.mean(term2_sum)\n\n gradeta_constraint_term1 = torch.autograd.grad(term1_mean, list(self.signaling_net.parameters()),\n retain_graph=True)\n gradeta_constraint_term2 = torch.autograd.grad(term2_mean, list(self.signaling_net.parameters()),\n retain_graph=True)\n\n gradeta_constraint_term_1st_flatten = flatten_layers(gradeta_constraint_term1)\n gradeta_constraint_term_2nd_flatten = flatten_layers(\n gradeta_constraint_term2) * self.config.sender.coe_for_recovery_fromgumbel\n\n gradeta_constraint_flatten = gradeta_constraint_term_1st_flatten \\\n + gradeta_constraint_term_2nd_flatten\n if self.config.sender.sender_objective_alpha >= 1:\n gradeta_flatten = gradeta_flatten / self.config.sender.sender_objective_alpha + gradeta_constraint_flatten\n elif 0 <= self.config.sender.sender_objective_alpha < 1:\n gradeta_flatten = gradeta_flatten + self.config.sender.sender_objective_alpha * gradeta_constraint_flatten\n else:\n # raise IOError\n pass\n\n # reform to be in original shape\n gradeta_flatten = gradeta_flatten.squeeze()\n gradeta = []\n idx = 0\n for layerl in self.signaling_net.parameters():\n len_layerl = 1\n for i in layerl.shape:\n len_layerl *= i\n gradeta_layerl_section = gradeta_flatten[idx:idx + len_layerl]\n gradeta_layerl = gradeta_layerl_section.view(layerl.shape)\n gradeta.append(gradeta_layerl)\n idx += len_layerl\n\n return gradeta\n\n def save_models(self):\n self.critic_Gi.save_checkpoint()\n self.critic_Gj.save_checkpoint()\n self.target_critic_Gi.save_checkpoint()\n self.target_critic_Gj.save_checkpoint()\n self.signaling_net.save_checkpoint()\n\n def load_models(self):\n self.critic_Gi.load_checkpoint()\n self.critic_Gj.load_checkpoint()\n self.target_critic_Gi.load_checkpoint()\n self.target_critic_Gj.load_checkpoint()\n self.signaling_net.load_checkpoint()\n", "repo_name": "YueLin301/InformationDesignMARL", "sub_path": "exp_reaching_goals/agent_class.py", "file_name": "agent_class.py", "file_ext": "py", "file_size_in_byte": 13649, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "76", "api": [{"api_name": "exp_reaching_goals.network.critic", "line_number": 9, "usage_type": "call"}, {"api_name": "torch.autograd.grad", "line_number": 24, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 24, "usage_type": "attribute"}, {"api_name": "exp_reaching_goals.network.critic", "line_number": 46, "usage_type": "call"}, {"api_name": "exp_reaching_goals.network.critic", "line_number": 49, "usage_type": "call"}, {"api_name": "exp_reaching_goals.network.signaling_net", "line_number": 52, "usage_type": "call"}, {"api_name": "torch.nn.MSELoss", "line_number": 54, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 54, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 55, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 55, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 56, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 56, "usage_type": "attribute"}, {"api_name": "torch.optim.Adam", "line_number": 57, "usage_type": "call"}, {"api_name": "torch.optim", "line_number": 57, "usage_type": "attribute"}, {"api_name": "exp_reaching_goals.network.critic", "line_number": 60, "usage_type": "call"}, {"api_name": "exp_reaching_goals.network.critic", "line_number": 63, "usage_type": "call"}, {"api_name": "torch.nn.functional.one_hot", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 76, "usage_type": "attribute"}, {"api_name": "torch.arange", "line_number": 76, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 83, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 91, "usage_type": "call"}, {"api_name": "exp_reaching_goals.network.critic", "line_number": 93, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 94, "usage_type": "call"}, {"api_name": "torch.nn.functional.gumbel_softmax", "line_number": 145, "usage_type": "call"}, {"api_name": "torch.nn", "line_number": 145, "usage_type": "attribute"}, {"api_name": "torch.nonzero", "line_number": 159, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 171, "usage_type": "call"}, {"api_name": "torch.log", "line_number": 172, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 175, "usage_type": "call"}, {"api_name": "torch.autograd.grad", "line_number": 183, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 183, "usage_type": "attribute"}, {"api_name": "exp_reaching_goals.reaching_goals_utils.flatten_layers", "line_number": 184, "usage_type": "call"}, {"api_name": "torch.randint", "line_number": 189, "usage_type": "call"}, {"api_name": "torch.floor", "line_number": 192, "usage_type": "call"}, {"api_name": "torch.cat", "line_number": 194, "usage_type": "call"}, {"api_name": "torch.zeros", "line_number": 195, "usage_type": "call"}, {"api_name": "torch.double", "line_number": 196, "usage_type": "attribute"}, {"api_name": "torch.cat", "line_number": 201, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 209, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 210, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 215, "usage_type": "call"}, {"api_name": "torch.sum", "line_number": 216, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 218, "usage_type": "call"}, {"api_name": "torch.mean", "line_number": 219, "usage_type": "call"}, {"api_name": "torch.autograd.grad", "line_number": 221, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 221, "usage_type": "attribute"}, {"api_name": "torch.autograd.grad", "line_number": 223, "usage_type": "call"}, {"api_name": "torch.autograd", "line_number": 223, "usage_type": "attribute"}, {"api_name": "exp_reaching_goals.reaching_goals_utils.flatten_layers", "line_number": 226, "usage_type": "call"}, {"api_name": "exp_reaching_goals.reaching_goals_utils.flatten_layers", "line_number": 227, "usage_type": "call"}]} +{"seq_id": "39986526485", "text": "from datetime import datetime, date, timedelta, timezone\nfrom flask import jsonify, abort\nimport jwt\nimport mysql.connector\nimport os\nimport uuid\n\ndef createUUID():\n return str(uuid.uuid4())\n\ndef getProfileFromDb(conn, uid): \n with conn.cursor() as c:\n # get latest (by row creation timetsamp) profile from db\n q = 'SELECT weight, height, gender, age, goal, userid FROM profiles WHERE userid=%s ORDER BY datecreated DESC LIMIT 1'\n args = (uid,)\n c.execute(q, args)\n retVal = c.fetchall()\n if len(retVal) != 1:\n return abort(400)\n retVal = {\n 'weight': retVal[0][0],\n 'height': retVal[0][1],\n 'gender': retVal[0][2],\n 'age': retVal[0][3],\n 'goal': retVal[0][4],\n 'uid': retVal[0][5]\n }\n\n return retVal\n\ndef getProfile(conn, uid):\n # simply return result from obtaining latest profile from DB utility function\n return jsonify(getProfileFromDb(conn, uid))\n \ndef postProfile(conn, uid, data):\n # extract and validate data from body \n if 'weight' not in data:\n return abort(400)\n if 'height' not in data:\n return abort(400)\n if 'gender' not in data:\n return abort(400)\n if 'age' not in data:\n return abort(400)\n if 'goal' not in data:\n return abort(400)\n # weight, height, gender, age\n weight = data['weight']\n height = data['height']\n gender = data['gender']\n age = data['age']\n goal = data['goal']\n if weight is None or height is None or gender is None or age is None or goal is None:\n return abort(400)\n try:\n weight = int(weight) \n height = int(height)\n age = int(age)\n except:\n return abort(400)\n\n if weight < 40:\n return abort(400)\n if height < 120:\n return abort(400)\n if age < 16 or age > 110:\n return abort(400)\n if len(goal) < 1:\n return abort(400)\n \n with conn.cursor() as c:\n # insert new profile row into db\n q = 'INSERT INTO profiles (id, userid, weight, height, gender, age, goal) VALUES(%s, %s, %s, %s, %s, %s, %s)'\n args = (str(uuid.uuid4()), uid, weight, height, gender, age, goal)\n c.execute(q, args)\n try:\n conn.commit()\n except:\n abort(500)\n\n # return create/ update success message\n return jsonify({\n 'message': 'profile created/ updated successfully'\n }) \n\ndef parseMeal(meal):\n # extract relevant fields from meal and parse/ validate them\n if 'name' not in meal:\n return False\n if 'uri' not in meal:\n return False\n if 'image' not in meal:\n return False\n if 'calories' not in meal:\n return False\n name = meal['name']\n uri = meal['uri']\n image = meal['image']\n calories = meal['calories']\n if name is None or uri is None or image is None or calories is None:\n return False\n try:\n calories = int(calories)\n except:\n return False\n\n # return a dict of extracted data\n return {\n 'name': name,\n 'uri': uri,\n 'image': image,\n 'calories': calories\n }\n\ndef getPlan(conn, uid, data):\n # extract, parse and validate meal plan id from data\n if 'planId' not in data:\n return abort(400)\n planId = data['planId']\n with conn.cursor() as c:\n # query DB using the id \n q = 'SELECT breakfast_name, breakfast_uri, breakfast_image, breakfast_calories, lunch_name, lunch_uri, lunch_image, lunch_calories, dinner_name, dinner_uri, dinner_image, dinner_calories, DATE_FORMAT(dateplanned, \"%Y/%m/%d\") FROM plans WHERE userid=%s AND id=%s'\n args = (uid, planId)\n print(args)\n c.execute(q, args)\n retVal = c.fetchall()\n\n # return error status if no matching plan found\n if len(retVal) == 0:\n return abort(400)\n\n # format as JSON to return to requestor\n return jsonify({\n 'breakfast': {\n 'name': retVal[0][0],\n 'uri': retVal[0][1],\n 'image': retVal[0][2],\n 'calories': retVal[0][3]\n },\n 'lunch': {\n 'name': retVal[0][4],\n 'uri': retVal[0][5],\n 'image': retVal[0][6],\n 'calories': retVal[0][7]\n },\n 'dinner': {\n 'name': retVal[0][8],\n 'uri': retVal[0][9],\n 'image': retVal[0][10],\n 'calories': retVal[0][11]\n },\n 'plannedDate': retVal[0][12]\n })\n\ndef postPlan(conn, uid, data): \n # extract, parse and validate relevant arguments from data \n if 'breakfast' not in data:\n return abort(400) \n if 'lunch' not in data:\n return abort(400)\n if 'dinner' not in data:\n return abort(400)\n if 'plannedDate' not in data:\n return abort(400)\n\n breakfast = parseMeal(data['breakfast'])\n lunch = parseMeal(data['lunch'])\n dinner = parseMeal(data['dinner'])\n plannedDate = data['plannedDate']\n if not breakfast or not lunch or not dinner or not plannedDate:\n return abort(400)\n try:\n # check if the planned date is the current date or in the future\n dt = datetime.strptime(plannedDate, '%Y/%m/%d').date()\n today = date.today()\n # if the planned date is in the past, return error status\n if dt < today:\n return abort(400)\n plannedDate = dt.strftime('%Y-%m-%d')\n except:\n return abort(400)\n with conn.cursor() as c:\n # insert new meal plan into DB\n q = 'INSERT INTO plans(id, breakfast_name, breakfast_uri, breakfast_image, breakfast_calories, lunch_name, lunch_uri, lunch_image, lunch_calories, dinner_name, dinner_uri, dinner_image, dinner_calories, datePlanned, userid) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)'\n args = (createUUID(), breakfast['name'], breakfast['uri'], breakfast['image'], breakfast['calories'], lunch['name'], lunch['uri'], lunch['image'], lunch['calories'], dinner['name'], dinner['uri'], dinner['image'], dinner['calories'], plannedDate, uid)\n try:\n c.execute(q, args)\n conn.commit()\n except mysql.connector.Error as err:\n print('MySQL error:', err)\n return abort(500)\n except Exception as e:\n print('Misc. error:', e)\n return abort(500)\n \n # return success message when insertion is successful\n return jsonify({\n 'message': 'plan successfully created.'\n })\n\ndef createToken(sub):\n # use environment variables and current date time to create a valid JWT token\n # to be passed to the frontend\n delta = int(os.environ['JWT_DELTA'])\n secret = os.environ['JWT_SECRET']\n now = datetime.now(tz=timezone.utc)\n expiry = now + timedelta(seconds=delta)\n return jwt.encode({\n 'iat': now,\n 'exp': expiry,\n 'sub': sub\n }, secret)\n\ndef checkToken(token):\n # obtain the secret needed to decode a JWT token\n secret = os.environ['JWT_SECRET']\n try:\n # return the subject field of the token when decoding is successful\n return jwt.decode(token, secret, algorithms=['HS256'])['sub']\n except:\n # return None if the token is invalid or expired\n return None\n \ndef checkUser(conn, token):\n # extract the JWT token from the Authorization header\n token = token.replace('Bearer ', '')\n # check the validity of the token\n uid = checkToken(token)\n if uid is None:\n return False\n \n with conn.cursor() as c:\n # query the DB to ensure the extracted id is valid\n q = 'SELECT COUNT(*) FROM users WHERE id = %s'\n args = (uid,)\n c.execute(q, args)\n retVal = c.fetchall()\n # return uid if token is valid\n if retVal[0][0] == 1:\n return uid\n # return False if the uid is invalid\n return False", "repo_name": "HakureiAnna/CM2020GroupProject", "sub_path": "caldown/backend/app/util.py", "file_name": "util.py", "file_ext": "py", "file_size_in_byte": 7950, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "uuid.uuid4", "line_number": 9, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 19, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 33, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 38, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 40, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 42, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 44, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 54, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 60, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 63, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 65, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 67, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 69, "usage_type": "call"}, {"api_name": "uuid.uuid4", "line_number": 74, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 79, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 82, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 118, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 130, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 133, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 158, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 160, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 162, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 164, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 171, "usage_type": "call"}, {"api_name": "datetime.datetime.strptime", "line_number": 174, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 174, "usage_type": "name"}, {"api_name": "datetime.date.today", "line_number": 175, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 175, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 178, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 181, "usage_type": "call"}, {"api_name": "mysql.connector.connector", "line_number": 189, "usage_type": "attribute"}, {"api_name": "mysql.connector", "line_number": 189, "usage_type": "name"}, {"api_name": "flask.abort", "line_number": 191, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 194, "usage_type": "call"}, {"api_name": "flask.jsonify", "line_number": 197, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 204, "usage_type": "attribute"}, {"api_name": "os.environ", "line_number": 205, "usage_type": "attribute"}, {"api_name": "datetime.datetime.now", "line_number": 206, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 206, "usage_type": "name"}, {"api_name": "datetime.timezone.utc", "line_number": 206, "usage_type": "attribute"}, {"api_name": "datetime.timezone", "line_number": 206, "usage_type": "name"}, {"api_name": "datetime.timedelta", "line_number": 207, "usage_type": "call"}, {"api_name": "jwt.encode", "line_number": 208, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 216, "usage_type": "attribute"}, {"api_name": "jwt.decode", "line_number": 219, "usage_type": "call"}]} +{"seq_id": "72090163448", "text": "from unstructured.partition.auto import partition\nfrom unstructured.staging.base import elements_to_json\nfrom time import perf_counter as pc\n\nt0 = pc()\n\n\nelements = partition(filename=\"./resume.pdf\")\nfor element in elements:\n print(elements)\n print(\"\\n\")\n\n\nelements_to_json(elements, filename=\"output.json\")\n\nprint(pc() - t0)\n", "repo_name": "alextanhongpin/python-unstructured", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 332, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "time.perf_counter", "line_number": 5, "usage_type": "call"}, {"api_name": "unstructured.partition.auto.partition", "line_number": 8, "usage_type": "call"}, {"api_name": "unstructured.staging.base.elements_to_json", "line_number": 14, "usage_type": "call"}, {"api_name": "time.perf_counter", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "18250710533", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\nimport jenkins.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Artifact',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('filename', models.CharField(max_length=255)),\n ('url', models.CharField(max_length=255)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Build',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('build_id', models.CharField(max_length=255)),\n ('number', models.IntegerField()),\n ('duration', models.IntegerField(null=True)),\n ('url', models.CharField(max_length=255)),\n ('phase', models.CharField(max_length=25)),\n ('status', models.CharField(max_length=255)),\n ('console_log', models.TextField(null=True, editable=False, blank=True)),\n ('parameters', jenkins.fields.JSONField(null=True, editable=False, blank=True)),\n ('created_at', models.DateTimeField(auto_now_add=True)),\n ],\n options={\n 'ordering': ['-number'],\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='JenkinsServer',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(unique=True, max_length=255)),\n ('url', models.CharField(unique=True, max_length=255)),\n ('username', models.CharField(max_length=255)),\n ('password', models.CharField(max_length=255)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Job',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255)),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='JobType',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=255)),\n ('description', models.TextField(null=True, blank=True)),\n ('config_xml', models.TextField()),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='job',\n name='jobtype',\n field=models.ForeignKey(to='jenkins.JobType'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='job',\n name='server',\n field=models.ForeignKey(to='jenkins.JenkinsServer'),\n preserve_default=True,\n ),\n migrations.AlterUniqueTogether(\n name='job',\n unique_together=set([('server', 'name')]),\n ),\n migrations.AddField(\n model_name='build',\n name='job',\n field=models.ForeignKey(to='jenkins.Job'),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='build',\n name='requested_by',\n field=models.ForeignKey(blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True),\n preserve_default=True,\n ),\n migrations.AddField(\n model_name='artifact',\n name='build',\n field=models.ForeignKey(to='jenkins.Build'),\n preserve_default=True,\n ),\n ]\n", "repo_name": "B-Rich/capomastro", "sub_path": "jenkins/migrations/0001_initial.py", "file_name": "0001_initial.py", "file_ext": "py", "file_size_in_byte": 4238, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.db.migrations.Migration", "line_number": 9, "usage_type": "attribute"}, {"api_name": "django.db.migrations", "line_number": 9, "usage_type": "name"}, {"api_name": "django.db.migrations.swappable_dependency", "line_number": 12, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 12, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 12, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 12, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 16, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 16, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 19, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 19, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 20, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 20, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 21, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 21, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 25, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 25, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 27, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 27, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 30, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 30, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 31, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 31, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 32, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 32, "usage_type": "name"}, {"api_name": "django.db.models.IntegerField", "line_number": 33, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 33, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 34, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 34, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 35, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 35, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 36, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 36, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 37, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 37, "usage_type": "name"}, {"api_name": "jenkins.fields.fields.JSONField", "line_number": 38, "usage_type": "call"}, {"api_name": "jenkins.fields.fields", "line_number": 38, "usage_type": "attribute"}, {"api_name": "jenkins.fields", "line_number": 38, "usage_type": "name"}, {"api_name": "django.db.models.DateTimeField", "line_number": 39, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 39, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 44, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 44, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 46, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 46, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 49, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 49, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 50, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 50, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 51, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 51, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 52, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 52, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 53, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 53, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 57, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 57, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 59, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 59, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 62, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 62, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 63, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 63, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 67, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 67, "usage_type": "name"}, {"api_name": "django.db.migrations.CreateModel", "line_number": 69, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 69, "usage_type": "name"}, {"api_name": "django.db.models.AutoField", "line_number": 72, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 72, "usage_type": "name"}, {"api_name": "django.db.models.CharField", "line_number": 73, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 73, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 74, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 74, "usage_type": "name"}, {"api_name": "django.db.models.TextField", "line_number": 75, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 75, "usage_type": "name"}, {"api_name": "django.db.models.Model", "line_number": 79, "usage_type": "attribute"}, {"api_name": "django.db.models", "line_number": 79, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 81, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 81, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 84, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 84, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 87, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 87, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 90, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 90, "usage_type": "name"}, {"api_name": "django.db.migrations.AlterUniqueTogether", "line_number": 93, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 93, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 97, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 97, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 100, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 100, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 103, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 103, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 106, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 106, "usage_type": "name"}, {"api_name": "django.conf.settings.AUTH_USER_MODEL", "line_number": 106, "usage_type": "attribute"}, {"api_name": "django.conf.settings", "line_number": 106, "usage_type": "name"}, {"api_name": "django.db.migrations.AddField", "line_number": 109, "usage_type": "call"}, {"api_name": "django.db.migrations", "line_number": 109, "usage_type": "name"}, {"api_name": "django.db.models.ForeignKey", "line_number": 112, "usage_type": "call"}, {"api_name": "django.db.models", "line_number": 112, "usage_type": "name"}]} +{"seq_id": "32438391931", "text": "import glob\nimport csv\nimport wikipedia\n\nfilenames = glob.glob(\"/Users/ephraimkunz/Desktop/WikipediaDataMiner/incorrectly_classified/*.txt\")\nwrong = {}\n\nfor filename in filenames:\n with open(filename, 'r') as f:\n reader = csv.reader(f, delimiter=\",\")\n on_header = True\n for row in reader:\n if len(row) == 0:\n continue\n if on_header:\n on_header = False\n continue\n \n pageid = row[5]\n confidence = float(row[4])\n actual = row[1]\n predicted = row[2]\n\n if predicted != actual:\n false_pos = actual == \"2:no\"\n if pageid not in wrong.keys():\n wrong[pageid] = [(\"false_pos\" if false_pos else \"false_neg\", confidence)]\n else:\n wrong[pageid].append((\"false_pos\" if false_pos else \"false_neg\", confidence))\n\n\n# Lookup pageids for the mostly wrong\nitem_list = []\nfor k, v in wrong.iteritems():\n item_list.append((k, v[0][0], v[0][1]))\n\nitem_list.sort(key=lambda x: x[2], reverse=True)\n\ntop = [x for x in item_list if x[2] >= 0.9]\n\nprint()\nfor item in top:\n page = wikipedia.page(pageid=item[0])\n print(\"%s (%0.1f%%): %s\" % (page.title, item[2] * 100, item[1]))", "repo_name": "ephraimkunz/WikipediaDataMiner", "sub_path": "incorrectly_classified/incorrectly_classified.py", "file_name": "incorrectly_classified.py", "file_ext": "py", "file_size_in_byte": 1296, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "glob.glob", "line_number": 5, "usage_type": "call"}, {"api_name": "csv.reader", "line_number": 10, "usage_type": "call"}, {"api_name": "wikipedia.page", "line_number": 43, "usage_type": "call"}]} +{"seq_id": "24939960360", "text": "import fnmatch\nimport os\nfrom typing import Union, Tuple, List, Optional, MutableMapping, Any\nimport pygeoutils as geoutils\nimport async_retriever as ar\nimport py3dep\nfrom pydaymet import InvalidInputRange\nfrom pydaymet.core import Daymet, _check_requirements\nfrom pydaymet.pydaymet import _gridded_urls, _xarray_geomask\nfrom shapely.geometry import MultiPolygon, Polygon\nimport rasterio.features as rio_features\nimport io\nimport xarray as xr\nimport numpy as np\nimport pandas as pd\nfrom catchmentforcings.pet.pet4daymet import priestley_taylor, pm_fao56\nfrom catchmentforcings.utils.hydro_utils import t_range_days\n\nDEF_CRS = \"epsg:4326\"\n\n\ndef download_daymet_by_geom_bound(\n geometry: Union[Polygon, MultiPolygon, Tuple[float, float, float, float]],\n dates: Union[Tuple[str, str], Union[int, List[int]]],\n crs: str = DEF_CRS,\n variables: Optional[List[str]] = None,\n region: str = \"na\",\n time_scale: str = \"daily\",\n boundary: bool = True,\n) -> xr.Dataset:\n \"\"\"\n Get gridded data from the Daymet database at 1-km resolution in the boundary of the \"geometry\"\n\n if the error occurred: sqlite3.DatabaseError: database disk image is malformed,\n please delete the cache in the current directory of the performing script\n\n Parameters\n ----------\n geometry\n The geometry of the region of interest.\n dates\n Start and end dates as a tuple (start, end) or a list of years [2001, 2010, ...].\n crs\n The CRS of the input geometry, defaults to epsg:4326.\n variables\n List of variables to be downloaded. The acceptable variables are:\n ``tmin``, ``tmax``, ``prcp``, ``srad``, ``vp``, ``swe``, ``dayl``\n Descriptions can be found `here `__.\n region\n Region in the US, defaults to na. Acceptable values are:\n * na: Continental North America\n * hi: Hawaii\n * pr: Puerto Rico\n time_scale\n Data time scale which can be daily, monthly (monthly average),\n or annual (annual average). Defaults to daily.\n boundary\n if boundary is true, we will use the box of bounds as the geometry mask;\n otherwise, return downloaded data acccording to urls directly\n\n Returns\n -------\n xr.Dataset\n Daily climate data within a geometry's boundary\n\n Raises\n -------\n ValueError\n when downloading failed, raise a ValueError\n\n \"\"\"\n\n daymet = Daymet(variables, time_scale=time_scale, region=region)\n daymet.check_dates(dates)\n\n if isinstance(dates, tuple):\n dates_itr = daymet.dates_tolist(dates)\n else:\n dates_itr = daymet.years_tolist(dates)\n # transform the crs\n _geometry = geoutils.pygeoutils._geo2polygon(geometry, crs, DEF_CRS)\n\n if not _geometry.intersects(daymet.region_bbox[region]):\n raise InvalidInputRange(daymet.invalid_bbox_msg)\n\n urls, kwds = zip(\n *_gridded_urls(\n daymet.time_codes[time_scale],\n _geometry.bounds,\n daymet.region,\n daymet.variables,\n dates_itr,\n )\n )\n\n try:\n clm = xr.open_mfdataset(\n (\n io.BytesIO(r)\n for r in ar.retrieve(urls, \"binary\", request_kwds=kwds, max_workers=8)\n ),\n engine=\"scipy\",\n coords=\"minimal\",\n )\n except ValueError:\n msg = (\n \"The server did NOT process your request successfully. \"\n + \"Check your inputs and try again.\"\n )\n raise ValueError(msg)\n\n for k, v in daymet.units.items():\n if k in clm.variables:\n clm[k].attrs[\"units\"] = v\n\n clm = clm.drop_vars([\"lambert_conformal_conic\"])\n # daymet's crs comes from: https://daymet.ornl.gov/overview\n daymet_crs = \" \".join(\n [\n \"+proj=lcc\",\n \"+lat_1=25\",\n \"+lat_2=60\",\n \"+lat_0=42.5\",\n \"+lon_0=-100\",\n \"+x_0=0\",\n \"+y_0=0\",\n \"+ellps=WGS84\",\n \"+units=km\",\n \"+no_defs\",\n ]\n )\n clm.attrs[\"crs\"] = daymet_crs\n clm.attrs[\"nodatavals\"] = (0.0,)\n transform, _, _ = geoutils.pygeoutils._get_transform(clm, (\"y\", \"x\"))\n clm.attrs[\"transform\"] = transform\n clm.attrs[\"res\"] = (transform.a, transform.e)\n\n if isinstance(clm, xr.Dataset):\n for v in clm:\n clm[v].attrs[\"crs\"] = crs\n clm[v].attrs[\"nodatavals\"] = (0.0,)\n if boundary:\n return _xarray_geomask(clm, geometry.bounds, crs)\n else:\n return clm\n\n\ndef calculate_basin_grids_pet(\n clm_ds: xr.Dataset, pet_method: Union[str, list] = \"priestley_taylor\"\n) -> xr.Dataset:\n \"\"\"\n Compute Potential EvapoTranspiration using Daymet dataset.\n\n Parameters\n ----------\n clm_ds\n The dataset should include the following variables:\n `tmin``, ``tmax``, ``lat``, ``lon``, ``vp``, ``srad``, ``dayl``\n pet_method\n now support priestley_taylor and fao56\n\n Returns\n -------\n xr.Dataset\n The input dataset with an additional variable called ``pet``.\n\n \"\"\"\n\n if type(pet_method) is str:\n pet_method = [pet_method]\n assert np.sort(pet_method) in np.sort([\"priestley_taylor\", \"pm_fao56\"])\n\n keys = list(clm_ds.keys())\n reqs = [\"tmin\", \"tmax\", \"lat\", \"vp\", \"srad\", \"dayl\"]\n # units: °C, °C, °, Pa, W/m^2, seconds\n _check_requirements(reqs, keys)\n dtype = clm_ds.tmin.dtype\n dates = clm_ds[\"time\"]\n # km -> m\n res = clm_ds.res[0] * 1.0e3\n elev = py3dep.elevation_bygrid(clm_ds.x.values, clm_ds.y.values, clm_ds.crs, res)\n attrs = clm_ds.attrs\n clm_ds = xr.merge([clm_ds, elev], combine_attrs=\"override\")\n clm_ds.attrs = attrs\n clm_ds[\"elevation\"] = clm_ds.elevation.where(\n ~np.isnan(clm_ds.isel(time=0)[keys[0]]), drop=True\n ).T\n # Pa -> kPa\n clm_ds[\"vp\"] *= 1e-3\n # data -> day of year\n clm_ds[\"time\"] = pd.to_datetime(clm_ds.time.values).dayofyear.astype(dtype)\n\n t_min = clm_ds[\"tmin\"]\n t_max = clm_ds[\"tmax\"]\n # average over the daylight period of the day, W/m^2 -> average over the day, MJ m-2 day-1\n r_surf = clm_ds[\"srad\"] * clm_ds[\"dayl\"] * 1e-6\n lat = clm_ds.isel(time=0).lat\n # ° -> rad\n phi = lat * np.pi / 180.0\n elevation = clm_ds[\"elevation\"]\n doy = clm_ds[\"time\"]\n e_a = clm_ds[\"vp\"]\n\n for pet_name in pet_method:\n if pet_name == \"pm_fao56\":\n clm_ds[\"pet_fao56\"] = pm_fao56(\n t_min, t_max, r_surf, phi, elevation, doy, e_a=e_a\n )\n clm_ds[\"pet_fao56\"].attrs[\"units\"] = \"mm/day\"\n elif pet_name == \"priestley_taylor\":\n clm_ds[\"pet_pt\"] = priestley_taylor(\n t_min, t_max, r_surf, phi, elevation, doy, e_a=e_a\n )\n clm_ds[\"pet_pt\"].attrs[\"units\"] = \"mm/day\"\n\n # after calculation, recover the value of time and vp\n clm_ds[\"time\"] = dates\n clm_ds[\"vp\"] *= 1.0e3\n return clm_ds\n\n\ndef calculate_basin_mean(\n clm_ds: xr.Dataset,\n geometry: Union[Polygon, MultiPolygon, Tuple[float, float, float, float]],\n geo_crs: str = DEF_CRS,\n) -> xr.Dataset:\n \"\"\"\n Get gridded data from the Daymet database at 1-km resolution.\n\n Parameters\n ----------\n clm_ds\n gridded daymet Dataset of a basin.\n geometry\n The geometry of a basin.\n geo_crs\n The CRS of the input geometry, defaults to epsg:4326.\n Returns\n -------\n xr.Dataset\n Daily mean climate data of the basin\n\n \"\"\"\n\n clm = _xarray_geomask(clm_ds, geometry, geo_crs)\n ds = xr.Dataset({}, coords={\"time\": clm.time})\n for k in clm.data_vars:\n ds[k] = clm[k].mean(dim=(\"x\", \"y\"))\n return ds\n\n\ndef generate_boundary_dataset(\n clm_ds: xr.Dataset,\n geometry: Union[Polygon, MultiPolygon, Tuple[float, float, float, float]],\n geo_crs: str = DEF_CRS,\n) -> xr.Dataset:\n \"\"\"\n Generate an xarray dataset in the boundary of geometry, but the boundary belongs to clm_ds's array, not the geometry\n\n Parameters\n ----------\n clm_ds\n Downloaded gridded daymet Dataset of a basin.\n geometry\n The geometry of a basin.\n geo_crs\n The CRS of the input geometry, defaults to epsg:4326.\n\n Returns\n -------\n xr.Dataset\n an xarray dataset in the boundary of the geometry\n\n \"\"\"\n\n ds_dims = (\"y\", \"x\")\n transform, width, height = geoutils.pygeoutils._get_transform(clm_ds, ds_dims)\n _geometry = geoutils.pygeoutils._geo2polygon(geometry, geo_crs, clm_ds.crs)\n\n _mask = rio_features.geometry_mask(\n [_geometry], (height, width), transform, invert=True\n )\n # x - column, y - row\n y_idx, x_idx = np.where(_mask)\n y_idx_min = y_idx.min()\n y_idx_max = y_idx.max()\n x_idx_min = x_idx.min()\n x_idx_max = x_idx.max()\n _mask_bound = np.full(_mask.shape, False)\n _mask_bound[y_idx_min : y_idx_max + 1, x_idx_min : x_idx_max + 1] = True\n\n coords = {\n ds_dims[0]: clm_ds.coords[ds_dims[0]],\n ds_dims[1]: clm_ds.coords[ds_dims[1]],\n }\n mask_bound = xr.DataArray(_mask_bound, coords, dims=ds_dims)\n\n ds_bound_masked = clm_ds.where(mask_bound, drop=True)\n ds_bound_masked.attrs[\"transform\"] = transform\n return ds_bound_masked\n\n\ndef resample_nc(clm_ds: xr.Dataset, resample_size: Union[int, float]) -> xr.Dataset:\n \"\"\"\n Resample the dataset to the resample_size\n\n Because Daymet's resolution is 1km which means each grid is 1km * 1km in a x-y coordinate system,\n we think it's enough to use general regrid methods such as interpolate functions in scipy.\n\n Parameters\n ----------\n clm_ds\n the original xarray dataset\n resample_size\n the ratio of resampled dataset's resolution to the original dataset's\n\n Returns\n -------\n xr.Dataset\n the resampled dataset\n\n \"\"\"\n\n if resample_size > 1:\n # coarsen the original values\n ds = (\n clm_ds.coarsen(x=resample_size, boundary=\"pad\")\n .mean()\n .coarsen(y=resample_size, boundary=\"pad\")\n .mean()\n )\n else:\n ydim, xdim = (\"y\", \"x\")\n height, width = clm_ds.sizes[ydim], clm_ds.sizes[xdim]\n left, right = clm_ds[xdim].min().item(), clm_ds[xdim].max().item()\n bottom, top = clm_ds[ydim].min().item(), clm_ds[ydim].max().item()\n\n x_res = abs(left - right) / (width - 1)\n y_res = abs(top - bottom) / (height - 1)\n # interpolate the original values to the new resolution\n x_res_new = x_res * resample_size\n y_res_new = y_res * resample_size\n # the array is in a left-close-right-open range, so right + x_res\n new_x = np.arange(left, right + x_res, x_res_new)\n # the sequence of y is large -> small, for example, [941, 940, 939, ...]\n new_y = np.arange(bottom, top + y_res, y_res_new)[::-1]\n # we extrapolate some out-range values\n ds = clm_ds.interp(x=new_x, y=new_y, kwargs={\"fill_value\": \"extrapolate\"})\n return ds\n\n\ndef trans_daymet_to_camels_format(\n daymet_dir: str, output_dir: str, gage_dict: dict, region: str, year: int\n):\n \"\"\"\n Transform forcing data of daymet downloaded from GEE to the format in CAMELS.\n\n The GEE code used to generate the original data can be seen here:\n https://code.earthengine.google.com/e910596013b5b90cb9c800d17a54a2b3\n If you can read Chinese, and prefer Python code, you can see here:\n https://github.com/OuyangWenyu/hydroGIS/blob/master/GEE/4-geepy-gallery.ipynb\n\n Parameters\n ----------\n daymet_dir\n the original data's directory\n output_dir\n the transformed data's directory\n gage_dict\n a dict containing gage's ids and the correspond HUC02 ids\n region\n we named the file downloaded from GEE as daymet__mean_.csv,\n because we use GEE code to generate data for each year for each shape file (region) containing some basins.\n For example, if we use the basins' shpfile in CAMELS, the region is \"camels\".\n year\n we use GEE code to generate data for each year, so each year for each region has one data file.\n Returns\n -------\n None\n \"\"\"\n\n name_dataset = [\n \"gage_id\",\n \"time_start\",\n \"dayl\",\n \"prcp\",\n \"srad\",\n \"swe\",\n \"tmax\",\n \"tmin\",\n \"vp\",\n ]\n camels_index = [\n \"Year\",\n \"Mnth\",\n \"Day\",\n \"Hr\",\n \"dayl(s)\",\n \"prcp(mm/day)\",\n \"srad(W/m2)\",\n \"swe(mm)\",\n \"tmax(C)\",\n \"tmin(C)\",\n \"vp(Pa)\",\n ]\n\n if \"STAID\" in gage_dict:\n gage_id_key = \"STAID\"\n elif \"gauge_id\" in gage_dict:\n gage_id_key = \"gauge_id\"\n elif \"gage_id\" in gage_dict:\n gage_id_key = \"gage_id\"\n else:\n raise NotImplementedError(\"No such gage id name\")\n\n if \"HUC02\" in gage_dict:\n huc02_key = \"HUC02\"\n elif \"huc_02\" in gage_dict:\n huc02_key = \"huc_02\"\n else:\n raise NotImplementedError(\"No such huc02 id\")\n\n for f_name in os.listdir(daymet_dir):\n if fnmatch.fnmatch(f_name, f\"daymet_{region}_mean_{year}.csv\"):\n data_file = os.path.join(daymet_dir, f_name)\n # because this func only works for one region and one year, it means it only works for one file once\n # Hence, when we find the file and transform it, just finish\n break\n data_temp = pd.read_csv(data_file, sep=\",\", dtype={name_dataset[0]: str})\n for i_basin in range(len(gage_dict[gage_id_key])):\n # name csv\n # if type is not str, may not find the data\n assert type(gage_dict[gage_id_key][i_basin]) == str\n basin_data = data_temp[\n data_temp[name_dataset[0]] == gage_dict[gage_id_key][i_basin]\n ]\n if basin_data.shape[0] == 0:\n raise ArithmeticError(\"Such chosen basins have no data\")\n # get Year,Month,Day,Hour info\n csv_date = pd.to_datetime(basin_data[name_dataset[1]])\n # the hour is set to 12, as 12 is the average hour of a day\n year_month_day_hour = pd.DataFrame(\n [[dt.year, dt.month, dt.day, 12] for dt in csv_date],\n columns=camels_index[:4],\n )\n data_df = pd.DataFrame(basin_data.iloc[:, 2:].values, columns=camels_index[4:])\n # concat\n new_data_df = pd.concat([year_month_day_hour, data_df], axis=1)\n # output the result\n huc_id = gage_dict[huc02_key][i_basin]\n output_huc_dir = os.path.join(output_dir, huc_id)\n if not os.path.isdir(output_huc_dir):\n os.makedirs(output_huc_dir)\n output_file = os.path.join(\n output_huc_dir, gage_dict[gage_id_key][i_basin] + \"_lump_daymet_forcing.txt\"\n )\n print(\"output forcing data of\", gage_dict[gage_id_key][i_basin], \"year\", year)\n if os.path.isfile(output_file):\n data_old = pd.read_csv(output_file, sep=\" \")\n years = np.unique(data_old[camels_index[0]].values)\n if year in years:\n continue\n os.remove(output_file)\n new_data_df = pd.concat([data_old, new_data_df]).sort_values(\n by=camels_index[:3]\n )\n new_data_df.to_csv(\n output_file, header=True, index=False, sep=\" \", float_format=\"%.2f\"\n )\n\n\ndef insert_daymet_value_in_leap_year(data_dir: str, t_range: list = None):\n \"\"\"\n interpolation for the 12.31 data in leap year\n\n Parameters\n ----------\n data_dir\n the transformed but not inserted data's directory\n t_range\n the time range to insert, the default range is [\"1980-01-01\", \"2020-01-01\"]\n\n Returns\n -------\n None\n \"\"\"\n\n if t_range is None:\n t_range = [\"1980-01-01\", \"2020-01-01\"]\n subdir_str = os.listdir(data_dir)\n col_lst = [\n \"dayl(s)\",\n \"prcp(mm/day)\",\n \"srad(W/m2)\",\n \"swe(mm)\",\n \"tmax(C)\",\n \"tmin(C)\",\n \"vp(Pa)\",\n ]\n for i in range(len(subdir_str)):\n subdir = os.path.join(data_dir, subdir_str[i])\n path_list = os.listdir(subdir)\n path_list.sort()\n for filename in path_list:\n data_file = os.path.join(subdir, filename)\n is_leap_file_name = data_file[-8:]\n if \"leap\" in is_leap_file_name:\n continue\n print(\"reading\", data_file)\n data_temp = pd.read_csv(data_file, sep=r\"\\s+\")\n data_temp.rename(columns={\"Mnth\": \"Month\"}, inplace=True)\n df_date = data_temp[[\"Year\", \"Month\", \"Day\"]]\n date = pd.to_datetime(df_date).values.astype(\"datetime64[D]\")\n # daymet file not for leap year, there is no data in 12.31 in leap year\n assert all(x < y for x, y in zip(date, date[1:]))\n t_range_list = t_range_days(t_range)\n [c, ind1, ind2] = np.intersect1d(date, t_range_list, return_indices=True)\n # assert date[0] <= t_range_list[0] and date[-1] >= t_range_list[-1]\n nt = t_range_list.size\n out = np.full([nt, 7], np.nan)\n out[ind2, :] = data_temp[col_lst].values[ind1]\n x = pd.DataFrame(out, columns=col_lst)\n x_intepolate = x.interpolate(\n method=\"linear\", limit_direction=\"forward\", axis=0\n )\n csv_date = pd.to_datetime(t_range_list)\n year_month_day_hour = pd.DataFrame(\n [[dt.year, dt.month, dt.day, dt.hour] for dt in csv_date],\n columns=[\"Year\", \"Mnth\", \"Day\", \"Hr\"],\n )\n # concat\n new_data_df = pd.concat([year_month_day_hour, x_intepolate], axis=1)\n output_file = data_file[:-4] + \"_leap.txt\"\n new_data_df.to_csv(\n output_file, header=True, index=False, sep=\" \", float_format=\"%.2f\"\n )\n os.remove(data_file)\n", "repo_name": "OuyangWenyu/CatchmentForcings", "sub_path": "catchmentforcings/daymet4basins/basin_daymet_process.py", "file_name": "basin_daymet_process.py", "file_ext": "py", "file_size_in_byte": 17876, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "typing.Union", "line_number": 23, "usage_type": "name"}, {"api_name": "shapely.geometry.Polygon", "line_number": 23, "usage_type": "name"}, {"api_name": "shapely.geometry.MultiPolygon", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 23, "usage_type": "name"}, {"api_name": "typing.Union", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 24, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 26, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 26, "usage_type": "name"}, {"api_name": "pydaymet.core.Daymet", "line_number": 73, "usage_type": "call"}, {"api_name": "pygeoutils.pygeoutils._geo2polygon", "line_number": 81, "usage_type": "call"}, {"api_name": "pygeoutils.pygeoutils", "line_number": 81, "usage_type": "attribute"}, {"api_name": "pydaymet.InvalidInputRange", "line_number": 84, "usage_type": "call"}, {"api_name": "pydaymet.pydaymet._gridded_urls", "line_number": 87, "usage_type": "call"}, {"api_name": "xarray.open_mfdataset", "line_number": 97, "usage_type": "call"}, {"api_name": "io.BytesIO", "line_number": 99, "usage_type": "call"}, {"api_name": "async_retriever.retrieve", "line_number": 100, "usage_type": "call"}, {"api_name": "pygeoutils.pygeoutils._get_transform", "line_number": 134, "usage_type": "call"}, {"api_name": "pygeoutils.pygeoutils", "line_number": 134, "usage_type": "attribute"}, {"api_name": "xarray.Dataset", "line_number": 138, "usage_type": "attribute"}, {"api_name": "pydaymet.pydaymet._xarray_geomask", "line_number": 143, "usage_type": "call"}, {"api_name": "xarray.Dataset", "line_number": 30, "usage_type": "attribute"}, {"api_name": "xarray.Dataset", "line_number": 149, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 149, "usage_type": "name"}, {"api_name": "numpy.sort", "line_number": 171, "usage_type": "call"}, {"api_name": "pydaymet.core._check_requirements", "line_number": 176, "usage_type": "call"}, {"api_name": "py3dep.elevation_bygrid", "line_number": 181, "usage_type": "call"}, {"api_name": "xarray.merge", "line_number": 183, "usage_type": "call"}, {"api_name": "numpy.isnan", "line_number": 186, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 191, "usage_type": "call"}, {"api_name": "numpy.pi", "line_number": 199, "usage_type": "attribute"}, {"api_name": "catchmentforcings.pet.pet4daymet.pm_fao56", "line_number": 206, "usage_type": "call"}, {"api_name": "catchmentforcings.pet.pet4daymet.priestley_taylor", "line_number": 211, "usage_type": "call"}, {"api_name": "xarray.Dataset", "line_number": 150, "usage_type": "attribute"}, {"api_name": "xarray.Dataset", "line_number": 223, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 224, "usage_type": "name"}, {"api_name": "shapely.geometry.Polygon", "line_number": 224, "usage_type": "name"}, {"api_name": "shapely.geometry.MultiPolygon", "line_number": 224, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 224, "usage_type": "name"}, {"api_name": "pydaymet.pydaymet._xarray_geomask", "line_number": 245, "usage_type": "call"}, {"api_name": "xarray.Dataset", "line_number": 246, "usage_type": "call"}, {"api_name": "xarray.Dataset", "line_number": 226, "usage_type": "attribute"}, {"api_name": "xarray.Dataset", "line_number": 253, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 254, "usage_type": "name"}, {"api_name": "shapely.geometry.Polygon", "line_number": 254, "usage_type": "name"}, {"api_name": "shapely.geometry.MultiPolygon", "line_number": 254, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 254, "usage_type": "name"}, {"api_name": "pygeoutils.pygeoutils._get_transform", "line_number": 277, "usage_type": "call"}, {"api_name": "pygeoutils.pygeoutils", "line_number": 277, "usage_type": "attribute"}, {"api_name": "pygeoutils.pygeoutils._geo2polygon", "line_number": 278, "usage_type": "call"}, {"api_name": "pygeoutils.pygeoutils", "line_number": 278, "usage_type": "attribute"}, {"api_name": "rasterio.features.geometry_mask", "line_number": 280, "usage_type": "call"}, {"api_name": "rasterio.features", "line_number": 280, "usage_type": "name"}, {"api_name": "numpy.where", "line_number": 284, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 289, "usage_type": "call"}, {"api_name": "xarray.DataArray", "line_number": 296, "usage_type": "call"}, {"api_name": "xarray.Dataset", "line_number": 256, "usage_type": "attribute"}, {"api_name": "xarray.Dataset", "line_number": 303, "usage_type": "attribute"}, {"api_name": "typing.Union", "line_number": 303, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 344, "usage_type": "call"}, {"api_name": "numpy.arange", "line_number": 346, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 423, "usage_type": "call"}, {"api_name": "fnmatch.fnmatch", "line_number": 424, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 425, "usage_type": "call"}, {"api_name": "os.path", "line_number": 425, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 429, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 440, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 442, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 446, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 448, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 451, "usage_type": "call"}, {"api_name": "os.path", "line_number": 451, "usage_type": "attribute"}, {"api_name": "os.path.isdir", "line_number": 452, "usage_type": "call"}, {"api_name": "os.path", "line_number": 452, "usage_type": "attribute"}, {"api_name": "os.makedirs", "line_number": 453, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 454, "usage_type": "call"}, {"api_name": "os.path", "line_number": 454, "usage_type": "attribute"}, {"api_name": "os.path.isfile", "line_number": 458, "usage_type": "call"}, {"api_name": "os.path", "line_number": 458, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 459, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 460, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 463, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 464, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 490, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 501, "usage_type": "call"}, {"api_name": "os.path", "line_number": 501, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 502, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 505, "usage_type": "call"}, {"api_name": "os.path", "line_number": 505, "usage_type": "attribute"}, {"api_name": "pandas.read_csv", "line_number": 510, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 513, "usage_type": "call"}, {"api_name": "catchmentforcings.utils.hydro_utils.t_range_days", "line_number": 516, "usage_type": "call"}, {"api_name": "numpy.intersect1d", "line_number": 517, "usage_type": "call"}, {"api_name": "numpy.full", "line_number": 520, "usage_type": "call"}, {"api_name": "numpy.nan", "line_number": 520, "usage_type": "attribute"}, {"api_name": "pandas.DataFrame", "line_number": 522, "usage_type": "call"}, {"api_name": "pandas.to_datetime", "line_number": 526, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 527, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 532, "usage_type": "call"}, {"api_name": "os.remove", "line_number": 537, "usage_type": "call"}]} +{"seq_id": "20366559554", "text": "import requests\nimport json\nimport sys\nfrom bs4 import BeautifulSoup\nfrom casos import casos_positivos, casos_fallecidos\n\n\npoblacion_total = 33028673\n\n#!total_positivos: esto me obtiene una tupla de filas y columnas, por eso lo convierto en un array\ntotal_positivos = list(casos_positivos.shape)[0]\n#!total_positivos_hombres\ntotal_positivos_hombres = list(casos_positivos[casos_positivos['SEXO'] == \"MASCULINO\"].shape)[0]\n\n#!total_positivos_mujeres\ntotal_positivos_mujeres = list(casos_positivos[casos_positivos['SEXO'] == \"FEMENINO\"].shape)[0]\n\n\n#!total_fallecidos:\ntotal_fallecidos = list(casos_fallecidos.shape)[0]\n\n#!total_fallecidos_hombre:\ntotal_fallecidos_hombres = list(\n casos_fallecidos[casos_fallecidos['SEXO'] == \"MASCULINO\"].shape)[0]\n\n#!total_fallecidos_mujeres:\ntotal_fallecidos_mujeres = list(\n casos_fallecidos[casos_fallecidos['SEXO'] == \"FEMENINO\"].shape)[0]\n\n\n\n#!total_poblacion:\n# total_poblacion = poblacion_total[poblacion_total['Unnamed: 1'] == \"PERU\"].iloc[0,2]\ntotal_poblacion = poblacion_total\n#!positivos departamento loreto\npositivos_loreto = list(casos_positivos[casos_positivos['DEPARTAMENTO'] == \"LORETO\"].shape)[0]\n\n#!positivos departamento amazonas\npositivos_amazonas = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"AMAZONAS\"].shape)[0]\n\n\n#!positivos departamento tumbes\npositivos_tumbes = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"TUMBES\"].shape)[0]\n\n\n#!positivos departamento piura\npositivos_piura = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"PIURA\"].shape)[0]\n\n\n#!positivos departamento lambayeque\npositivos_lambayeque = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"LAMBAYEQUE\"].shape)[0]\n\n\n#!positivos departamento Cajamarca\npositivos_cajamarca = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"CAJAMARCA\"].shape)[0]\n\n\n\n#!positivos departamento La Libertad\npositivos_libertad = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"LA LIBERTAD\"].shape)[0]\n\n\n#!positivos departamento ancash\npositivos_ancash = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"ANCASH\"].shape)[0]\n\n#!positivos departamento san martin\npositivos_sanmartin = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"SAN MARTIN\"].shape)[0]\n\n\n\n#!positivos departamento huanuco\npositivos_huanuco = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"HUANUCO\"].shape)[0]\n\n\n\n#!positivos departamento ucayali\npositivos_ucayali = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"UCAYALI\"].shape)[0]\n\n\n\n#!positivos departamento pasco\npositivos_pasco = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"PASCO\"].shape)[0]\n\n\n\n#!positivos departamento lima\npositivos_lima = list(\n casos_positivos[(casos_positivos['DEPARTAMENTO'] == \"LIMA\") | (casos_positivos['DEPARTAMENTO'] == \"LIMA REGION\")].shape)[0]\n\n\n#!positivos departamento junin\npositivos_junin = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"JUNIN\"].shape)[0]\n\n\n\n#!positivos departamento huancavelica\npositivos_huancavelica = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"HUANCAVELICA\"].shape)[0]\n\n\n\n#!positivos departamento ica\npositivos_ica = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"ICA\"].shape)[0]\n\n\n#!positivos departamento ayacucho\npositivos_ayacucho = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"AYACUCHO\"].shape)[0]\n\n#!positivos departamento apurimac\npositivos_apurimac = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"APURIMAC\"].shape)[0]\n\n\n#!positivos departamento cusco\npositivos_cusco = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"CUSCO\"].shape)[0]\n\n#!positivos departamento madre de dios\npositivos_madrededios = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"MADRE DE DIOS\"].shape)[0]\n\n\n#!positivos departamento puno\npositivos_puno = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"PUNO\"].shape)[0]\n\n#!positivos departamento arequipa\npositivos_arequipa = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"AREQUIPA\"].shape)[0]\n\n\n#!positivos departamento moquegua\npositivos_moquegua = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"MOQUEGUA\"].shape)[0]\n\n\n#!positivos departamento tacna\npositivos_tacna = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"TACNA\"].shape)[0]\n\n#!positivos departamento callao\n\npositivos_callao = list(\n casos_positivos[casos_positivos['DEPARTAMENTO'] == \"CALLAO\"].shape)[0]\n\n#!Fallecidos por etapa de vida\n#!0 a 5 años (Primera infancia o Bebes o Infancia Temprana)\nfallecidos_preinfancia = list(casos_fallecidos[(casos_fallecidos['EDAD_DECLARADA'] >= 0) & (\n casos_fallecidos['EDAD_DECLARADA'] <= 5)].shape)[0]\n\n#!6 a 11 años (Infancia o Niños o Niñez)\nfallecidos_infancia = list(casos_fallecidos[(casos_fallecidos['EDAD_DECLARADA'] >= 6) & (\n casos_fallecidos['EDAD_DECLARADA'] <= 11)].shape)[0]\n\n#!12 a 18 años (Adolescencia o Adolescentes)\nfallecidos_adolescencia = list(casos_fallecidos[(casos_fallecidos['EDAD_DECLARADA'] >= 12) & (\n casos_fallecidos['EDAD_DECLARADA'] <= 18)].shape)[0]\n\n#!19 a 26 años (Juventud o Jóvenes)\nfallecidos_juventud = list(casos_fallecidos[(casos_fallecidos['EDAD_DECLARADA'] >= 19) & (\n casos_fallecidos['EDAD_DECLARADA'] <= 26)].shape)[0]\n\n#!27 a 59 años (Adultez o Adultos)\nfallecidos_adultez = list(casos_fallecidos[(casos_fallecidos['EDAD_DECLARADA'] >= 27) & (\n casos_fallecidos['EDAD_DECLARADA'] <= 59)].shape)[0]\n\n#!60 a mas (Persona Mayor o Ancianos)\nfallecidos_persona_mayor = list(\n casos_fallecidos[(casos_fallecidos['EDAD_DECLARADA'] >= 60)].shape)[0]\n\ncasos_generales = {\n \"name\": \"peru\",\n \"poblacion\": total_poblacion,\n \"positivos\": total_positivos,\n \"hombres_infectados\": total_positivos_hombres,\n \"mujeres_infectados\": total_positivos_mujeres,\n \"fallecidos\": total_fallecidos,\n \"hombres_fallecidos\": total_fallecidos_hombres,\n \"mujeres_fallecidos\": total_fallecidos_mujeres,\n \"etapa_de_vida_fallecidos\": {\n \"primera_infancia\": fallecidos_preinfancia,\n \"infancia\": fallecidos_infancia,\n \"adolescencia\": fallecidos_adolescencia,\n \"juventud\": fallecidos_juventud,\n \"adultez\": fallecidos_adultez,\n \"persona_mayor\": fallecidos_persona_mayor\n },\n \"mapa_hijos\": [\n positivos_amazonas,\n positivos_ancash,\n positivos_apurimac,\n positivos_arequipa,\n positivos_ayacucho,\n positivos_cajamarca,\n positivos_callao, \n positivos_cusco,\n positivos_huancavelica,\n positivos_huanuco,\n positivos_ica,\n positivos_junin,\n positivos_libertad,\n positivos_lambayeque,\n positivos_lima,\n positivos_loreto,\n positivos_madrededios,\n positivos_moquegua,\n positivos_pasco,\n positivos_piura,\n positivos_puno,\n positivos_sanmartin,\n positivos_tacna,\n positivos_tumbes,\n positivos_ucayali\n ]\n}\n\nprint(json.dumps(casos_generales));\nsys.stdout.flush();\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n", "repo_name": "vqc1909a/COVID19Peru-Backend", "sub_path": "python/peru.py", "file_name": "peru.py", "file_ext": "py", "file_size_in_byte": 7102, "program_lang": "python", "lang": "pt", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "casos.casos_positivos.shape", "line_number": 11, "usage_type": "attribute"}, {"api_name": "casos.casos_positivos", "line_number": 11, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 13, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 16, "usage_type": "name"}, {"api_name": "casos.casos_fallecidos.shape", "line_number": 20, "usage_type": "attribute"}, {"api_name": "casos.casos_fallecidos", "line_number": 20, "usage_type": "name"}, {"api_name": "casos.casos_fallecidos", "line_number": 24, "usage_type": "name"}, {"api_name": "casos.casos_fallecidos", "line_number": 28, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 36, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 40, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 45, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 50, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 55, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 60, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 66, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 71, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 75, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 81, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 87, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 93, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 99, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 104, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 110, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 116, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 121, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 125, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 130, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 134, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 139, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 143, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 148, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 153, "usage_type": "name"}, {"api_name": "casos.casos_positivos", "line_number": 158, "usage_type": "name"}, {"api_name": "casos.casos_fallecidos", "line_number": 162, "usage_type": "name"}, {"api_name": "casos.casos_fallecidos", "line_number": 163, "usage_type": "name"}, {"api_name": "casos.casos_fallecidos", "line_number": 166, "usage_type": "name"}, {"api_name": "casos.casos_fallecidos", "line_number": 167, "usage_type": "name"}, {"api_name": "casos.casos_fallecidos", "line_number": 170, "usage_type": "name"}, {"api_name": "casos.casos_fallecidos", "line_number": 171, "usage_type": "name"}, {"api_name": "casos.casos_fallecidos", "line_number": 174, "usage_type": "name"}, {"api_name": "casos.casos_fallecidos", "line_number": 175, "usage_type": "name"}, {"api_name": "casos.casos_fallecidos", "line_number": 178, "usage_type": "name"}, {"api_name": "casos.casos_fallecidos", "line_number": 179, "usage_type": "name"}, {"api_name": "casos.casos_fallecidos", "line_number": 183, "usage_type": "name"}, {"api_name": "json.dumps", "line_number": 231, "usage_type": "call"}, {"api_name": "sys.stdout.flush", "line_number": 232, "usage_type": "call"}, {"api_name": "sys.stdout", "line_number": 232, "usage_type": "attribute"}]} +{"seq_id": "4041428370", "text": "# Phillip LeClair\n# CS 5001\n# Final Project\n# 12/10/2023\n# testApp.py executes a series of tests to ensure functionality of the project\n\n\n# import unittest to create the testing framework\nimport unittest\n\n# import os to check file size\nimport os\nfrom datetime import date\nfrom os.path import getsize\n\n# import Driver to execute the webdriver\nfrom driver import Driver\n\n# import DataService to send API requests to Google Sheets\nfrom dataService import DataService\n\nclass TestDriver(unittest.TestCase):\n ''' TestDriver class creates a series of tests\n Parameters: unnittest.TestCase\n '''\n def test_data_service(self):\n ''' test_data_service tests whether Google Sheets API is returning data\n Parameters: None\n Returns nothing\n '''\n spreadsheet_id='1ZbTHgQc5p61oDLcAHoj2iAWcfgYXJJibnCFMo9boShI'\n service = DataService(spreadsheet_id)\n data = service.getData()\n # check to see if the sheet has data\n self.assertTrue(len(data['values']) > 0, msg='Returned sheet is empty!')\n def test_driver(self):\n ''' test_driver tests whether the webdriver scrapes listings correctly & saves a valid screenshot\n Parameters: None\n Returns nothing\n '''\n driver = Driver()\n\n tenant = 'Aspen Valley Hospital'\n city_state = 'Aspen, CO'\n specialty = 'Medical'\n # title = 'Doctor' -- optional\n results = driver.drive(tenant, city_state, specialty)\n\n path = os.getcwd() + f'/images/{tenant}'\n today = str(date.today())\n img_path = f'{path}/3pack - {today}.png'\n img_size = getsize(img_path) / 1000\n height, width = results['screenshot_size']\n driver.quit()\n # check to see if listing dimensions are valid:\n self.assertTrue(results['width'] > 0 and results['height'] > 0, msg='Listing dimensions are invalid')\n # check to see if screenshot dimensions are valid:\n self.assertTrue(height > 0 and width > 0, msg='Screenshot dimensions are invalid')\n # check to see if screenshot image size is valid (greater than 100kb):\n self.assertTrue(img_size > 10, msg='Screenshot file size is invalid (<10kb)!')\n\n\ndef main():\n ''' Main function executes the tests\n Parameters: None\n Returns nothing\n '''\n unittest.main(verbosity=3)\n\n\nif __name__ == \"__main__\":\n main()\n", "repo_name": "pjleclair/threePackTool", "sub_path": "testApp.py", "file_name": "testApp.py", "file_ext": "py", "file_size_in_byte": 2378, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "unittest.TestCase", "line_number": 22, "usage_type": "attribute"}, {"api_name": "dataService.DataService", "line_number": 32, "usage_type": "call"}, {"api_name": "driver.Driver", "line_number": 41, "usage_type": "call"}, {"api_name": "driver.drive", "line_number": 47, "usage_type": "call"}, {"api_name": "os.getcwd", "line_number": 49, "usage_type": "call"}, {"api_name": "datetime.date.today", "line_number": 50, "usage_type": "call"}, {"api_name": "datetime.date", "line_number": 50, "usage_type": "name"}, {"api_name": "os.path.getsize", "line_number": 52, "usage_type": "call"}, {"api_name": "driver.quit", "line_number": 54, "usage_type": "call"}, {"api_name": "unittest.main", "line_number": 68, "usage_type": "call"}]} +{"seq_id": "3442989528", "text": "#!/usr/bin/env python3\n\nimport soundfile as sf\nfrom sys import argv\nfrom pedalboard import (\n Pedalboard,\n Compressor,\n Convolution,\n Chorus,\n Gain,\n Reverb,\n Limiter,\n LadderFilter,\n Phaser,\n)\nimport time, webbrowser\n\nfileName = argv[0] if argv[0] != 'demo.py' else 'short-demo-song.wav'\n\nprint('reading soundfile %s...' % (fileName))\n\naudio, sample_rate = sf.read(fileName)\n\nnewFileName = 'short-demo-song-processed' + str(int(time.time())) + '.wav'\n\nprint('setting up board...')\n\n# Make a Pedalboard object, containing multiple plugins:\nboard = Pedalboard([\n Compressor(threshold_db=-24, ratio=25),\n Gain(gain_db=0.3),\n Chorus(rate_hz=0.2, mix=0.2, depth=0.1),\n Convolution('crash.wav', 0.6),\n LadderFilter(mode=LadderFilter.Mode.HPF12, cutoff_hz=300),\n # Phaser(),\n Reverb(room_size=0.3),\n], sample_rate=sample_rate)\n\nprint('board set up!')\n\n# Pedalboard objects behave like lists, so you can add plugins:\nboard.append(Compressor(threshold_db=-25, ratio=10))\n# board.append(Gain(gain_db=1))\nboard.append(Limiter())\n\nprint('running audio through pedalboard...')\n\n# Run the audio through this pedalboard!\neffected = board(audio)\n\nprint('writing back as wav file...')\n\n# Write the audio back as a wav file:\nwith sf.SoundFile(newFileName, 'w', samplerate=sample_rate, channels=len(effected.shape)) as f:\n f.write(effected)\n\nprint('done!')\n\nchrome_path = 'open -a /Applications/Google\\ Chrome.app %s'\n\nwebbrowser.get(chrome_path).open(newFileName)", "repo_name": "jonathanfann/jfx", "sub_path": "demo.py", "file_name": "demo.py", "file_ext": "py", "file_size_in_byte": 1497, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "sys.argv", "line_number": 18, "usage_type": "name"}, {"api_name": "soundfile.read", "line_number": 22, "usage_type": "call"}, {"api_name": "time.time", "line_number": 24, "usage_type": "call"}, {"api_name": "pedalboard.Pedalboard", "line_number": 29, "usage_type": "call"}, {"api_name": "pedalboard.Compressor", "line_number": 30, "usage_type": "call"}, {"api_name": "pedalboard.Gain", "line_number": 31, "usage_type": "call"}, {"api_name": "pedalboard.Chorus", "line_number": 32, "usage_type": "call"}, {"api_name": "pedalboard.Convolution", "line_number": 33, "usage_type": "call"}, {"api_name": "pedalboard.LadderFilter", "line_number": 34, "usage_type": "call"}, {"api_name": "pedalboard.LadderFilter.Mode", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pedalboard.Reverb", "line_number": 36, "usage_type": "call"}, {"api_name": "pedalboard.Compressor", "line_number": 42, "usage_type": "call"}, {"api_name": "pedalboard.Limiter", "line_number": 44, "usage_type": "call"}, {"api_name": "soundfile.SoundFile", "line_number": 54, "usage_type": "call"}, {"api_name": "webbrowser.get", "line_number": 61, "usage_type": "call"}]} +{"seq_id": "3449354118", "text": "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Feb 7 15:00:33 2020\r\n\r\n@author: marta\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mlxtend.preprocessing import TransactionEncoder\r\nfrom mlxtend.frequent_patterns import apriori\r\nfrom mlxtend.frequent_patterns import association_rules\r\nfrom kmodes.kmodes import KModes\r\nimport sklearn.cluster as cluster\r\nimport sklearn.neighbors as neighbors\r\nimport math\r\nfrom numpy import linalg as LA\r\n\r\n#QUESTION 1\r\ndata= pd.read_csv(\"Groceries.csv\");\r\n#a)\r\ndf = data.groupby('Customer')['Item'].nunique()\r\n\r\n\r\nhist=plt.hist(df)\r\nplt.xlabel('Number of different items')\r\nplt.ylabel('Number of clients')\r\nplt.savefig('hist_numitems.png')\r\nplt.show\r\n\r\np_25 = np.percentile(df,25)\r\nprint('The 25th percentile is :'+str(p_25)) \r\np_50 = np.percentile(df, 50)\r\nprint('The 50th percentile or median is :'+str(p_50)) \r\np_75 = np.percentile(df, 75)\r\nprint('The 75th percentile is :'+str(p_75)) \r\n\r\n#b)\r\nNx=75;\r\nN=df.size;\r\nsupport=(Nx/N);\r\nprint('The support is:'+str(support*100))\r\nListItem = data.groupby(['Customer'])['Item'].apply(list).values.tolist() \r\nte = TransactionEncoder()\r\nte_ary = te.fit(ListItem).transform(ListItem)\r\ntrainData = pd.DataFrame(te_ary, columns=te.columns_) # Item List -> Item Indicator\r\n\r\nfrequent_itemsets = apriori(trainData, min_support = support ,use_colnames = True)\r\nlength = frequent_itemsets.support.size\r\nprint('The number of frequent itemsets is: ' , length )\r\nprint('Itemset with most items: ' , frequent_itemsets.iloc[length-1]['itemsets'])\r\n\r\n#c)\r\n\r\nassoc_rules = association_rules(frequent_itemsets, metric = \"confidence\", min_threshold = 0.01)\r\nprint('The number of association rules is: ', assoc_rules['confidence'].count())\r\n\r\n#d)\r\nsupport_rule = assoc_rules['support'].to_numpy()\r\nconfidence = assoc_rules['confidence'].to_numpy()\r\nlift = assoc_rules['lift'].to_numpy()\r\nprint('The size of the marker is:',lift.shape[0])\r\n\r\nplt.scatter(confidence,support_rule, alpha=0.7 , s=lift)\r\nplt.xlabel('Confidence')\r\nplt.ylabel('Support')\r\nplt.title('Support vs Confidence')\r\nplt.savefig('support_confidence.png')\r\n\r\n#e)\r\nassoc_rules = association_rules(frequent_itemsets, metric = \"confidence\", min_threshold = 0.6)\r\nprint(assoc_rules)\r\n\r\n#QUESTION 2\r\ndf_2=pd.read_csv(\"cars.csv\")\r\n#a)\r\ntype_freq = df_2.groupby('Type')['Type'].count()\r\nprint( 'The frequency of Types is:',type_freq)\r\n#b)\r\nDriveTrain_freq = df_2.groupby('DriveTrain')['DriveTrain'].count()\r\nprint( 'The frequencies of DriveTrain are:',DriveTrain_freq)\r\n\r\n#c)\r\nOrigin_freq = df_2.groupby('Origin')['Origin'].count()\r\nprint( 'The frequencies of Origin is:',Origin_freq)\r\nd_Asia_Europe=(1/Origin_freq['Asia'])+(1/Origin_freq['Europe'])\r\nprint('The distance between Asia and Europe is:'+str(d_Asia_Europe))\r\n\r\n#d)\r\n\r\ndf_na = df_2.fillna(\"Missing\")\r\ncyl_freq = df_na.groupby('Cylinders')['Cylinders'].count()\r\nprint( 'The frequencies of Cylinder is:',cyl_freq)\r\nd_5_missing=(1/cyl_freq[5])+(1/cyl_freq['Missing'])\r\nprint('The distance between Cylinder 5 and Missing is:'+str(d_5_missing))\r\n\r\n#e)\r\ndf_na = df_2.fillna(0)\r\ndf1 = df_na[['Type','DriveTrain','Origin','Cylinders']]\r\ndf1[\"Cylinders\"] = df1[\"Cylinders\"].astype('category')\r\nkm = KModes(n_clusters=3, init='Huang')\r\nclusters = km.fit_predict(df1)\r\n\r\nunique, counts = np.unique(clusters, return_counts=True)\r\nprint('The number of observations in each cluster is:',counts)\r\n\r\n# Print the cluster centroids\r\nprint(km.cluster_centroids_)\r\n#f)\r\nclusters=pd.DataFrame(clusters)\r\ndata = pd.concat([df1, clusters],axis=1)\r\ndata.columns = ['Type', 'DriveTrain','Origin','Cylinders','Cluster']\r\norigin_freq = data.groupby('Cluster').Origin.value_counts()\r\n\r\n\r\n\r\n\r\n#QUESTION 3\r\ndata_3= pd.read_csv(\"FourCircle.csv\");\r\nrdm_state=60616;\r\nplt.scatter(data_3.x,data_3.y)\r\nplt.title('Scatter plot')\r\nplt.savefig('scatter.png')\r\n\r\n#b)\r\nx = np.array(data_3['x'])\r\ny = np.array(data_3['y'])\r\nX = np.column_stack((x,y))\r\nkmeans = cluster.KMeans(n_clusters=4, random_state=rdm_state).fit(X)\r\nprint(\"Cluster Assignment:\", kmeans.labels_)\r\nprint(\"Cluster Centroid 0:\", kmeans.cluster_centers_[0])\r\nprint(\"Cluster Centroid 1:\", kmeans.cluster_centers_[1])\r\nprint(\"Cluster Centroid 2:\", kmeans.cluster_centers_[2])\r\nprint(\"Cluster Centroid 3:\", kmeans.cluster_centers_[3])\r\n\r\nplt.scatter(x, y, c=kmeans.labels_.astype(float))\r\nplt.xlabel('x')\r\nplt.ylabel('y')\r\nplt.title('Scatter plot before K-mean clustering')\r\nplt.show()\r\nplt.savefig('scatter_KMeans.png')\r\n\r\n\r\n#c)\r\n\r\nn_neigh = 10\r\nkNNSpec =neighbors.NearestNeighbors(n_neighbors = n_neigh, algorithm = 'brute', metric = 'euclidean')\r\nnbrs = kNNSpec.fit(X)\r\nd3, i3 = nbrs.kneighbors(X)\r\nprint('Distance to the nearest neighbors: '+str(d3))\r\nprint('Which are the nearest neihbors: '+str(i3))\r\n\r\n# Retrieve the distances among the observations\r\ndistObject =neighbors.DistanceMetric.get_metric('euclidean')\r\ndistances = distObject.pairwise(X)\r\n\r\nnObs = 1440\r\n\r\n# Create the Adjacency matrix\r\nAdjacency = np.zeros((nObs, nObs))\r\nfor i in range(nObs):\r\n for j in i3[i]:\r\n Adjacency[i,j] = math.exp(- (distances[i][j])**2 )\r\n\r\n# Make the Adjacency matrix symmetric\r\nAdjacency = 0.5 * (Adjacency + Adjacency.transpose())\r\n\r\n# Create the Degree matrix\r\nDegree = np.zeros((nObs, nObs))\r\nfor i in range(nObs):\r\n sum = 0\r\n for j in range(nObs):\r\n sum += Adjacency[i,j]\r\n Degree[i,i] = sum\r\n\r\n# Create the Laplacian matrix \r\nLmatrix = Degree - Adjacency\r\n\r\n# Obtain the eigenvalues and the eigenvectors of the Laplacian matrix\r\nevals, evecs = LA.eigh(Lmatrix)\r\n\r\n# Series plot of the smallest five eigenvalues to determine the number of clusters\r\nsequence = np.arange(1,6,1) \r\nplt.plot(sequence, evals[0:5,], marker = \"o\")\r\nplt.xlabel('Sequence')\r\nplt.ylabel('Eigenvalue')\r\nplt.xticks(sequence)\r\nplt.grid(\"both\")\r\nplt.show()\r\nprint('The eigenvalues in scientific notation are:',evals[0:5,])\r\n\r\n# Series plot of the smallest twenty eigenvalues to determine the number of neighbors\r\nsequence = np.arange(1,21,1) \r\nplt.plot(sequence, evals[0:20,], marker = \"o\")\r\nplt.xlabel('Sequence')\r\nplt.ylabel('Eigenvalue')\r\nplt.grid(\"both\")\r\nplt.xticks(sequence)\r\nplt.show()\r\n\r\n\r\n#e)\r\nZ = evecs[:,[0,3]]\r\nprint('The eigenvectors are:',Z)\r\n# Final KMeans solution \r\nkmeans_spectral = cluster.KMeans(n_clusters=4, random_state=rdm_state).fit(Z)\r\ndata_3['SpectralCluster'] = kmeans_spectral.labels_\r\n\r\nplt.scatter(data_3['x'], data_3['y'], c = data_3['SpectralCluster'])\r\nplt.xlabel('x')\r\nplt.ylabel('y')\r\nplt.title('Spectral Cluster with {neighbors} neighbors'.format(neighbors = n_neigh))\r\nplt.savefig('Spectral_{neighbors}.png'.format(neighbors = n_neigh))\r\nplt.grid(True)\r\nplt.show()\r\n", "repo_name": "martagruizdeleon/IIT-", "sub_path": "Homework 2.py", "file_name": "Homework 2.py", "file_ext": "py", "file_size_in_byte": 6652, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "pandas.read_csv", "line_number": 21, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.hist", "line_number": 26, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 26, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 27, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 27, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 28, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 28, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 29, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 29, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 30, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot", "line_number": 30, "usage_type": "name"}, {"api_name": "numpy.percentile", "line_number": 32, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 34, "usage_type": "call"}, {"api_name": "numpy.percentile", "line_number": 36, "usage_type": "call"}, {"api_name": "mlxtend.preprocessing.TransactionEncoder", "line_number": 45, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 47, "usage_type": "call"}, {"api_name": "mlxtend.frequent_patterns.apriori", "line_number": 49, "usage_type": "call"}, {"api_name": "mlxtend.frequent_patterns.association_rules", "line_number": 56, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 65, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 65, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 66, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 66, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 67, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 67, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 68, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 68, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 69, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 69, "usage_type": "name"}, {"api_name": "mlxtend.frequent_patterns.association_rules", "line_number": 72, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 76, "usage_type": "call"}, {"api_name": "kmodes.kmodes.KModes", "line_number": 102, "usage_type": "call"}, {"api_name": "numpy.unique", "line_number": 105, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 111, "usage_type": "call"}, {"api_name": "pandas.concat", "line_number": 112, "usage_type": "call"}, {"api_name": "pandas.read_csv", "line_number": 120, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 122, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 122, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 123, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 123, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 124, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 124, "usage_type": "name"}, {"api_name": "numpy.array", "line_number": 127, "usage_type": "call"}, {"api_name": "numpy.array", "line_number": 128, "usage_type": "call"}, {"api_name": "numpy.column_stack", "line_number": 129, "usage_type": "call"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 130, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 130, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 137, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 137, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 138, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 138, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 139, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 139, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 140, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 140, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 141, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 141, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 142, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 142, "usage_type": "name"}, {"api_name": "sklearn.neighbors.NearestNeighbors", "line_number": 148, "usage_type": "call"}, {"api_name": "sklearn.neighbors", "line_number": 148, "usage_type": "name"}, {"api_name": "sklearn.neighbors.DistanceMetric.get_metric", "line_number": 155, "usage_type": "call"}, {"api_name": "sklearn.neighbors.DistanceMetric", "line_number": 155, "usage_type": "attribute"}, {"api_name": "sklearn.neighbors", "line_number": 155, "usage_type": "name"}, {"api_name": "numpy.zeros", "line_number": 161, "usage_type": "call"}, {"api_name": "math.exp", "line_number": 164, "usage_type": "call"}, {"api_name": "numpy.zeros", "line_number": 170, "usage_type": "call"}, {"api_name": "numpy.linalg.eigh", "line_number": 181, "usage_type": "call"}, {"api_name": "numpy.linalg", "line_number": 181, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 184, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 185, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 185, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 186, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 186, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 187, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 187, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 188, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 188, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 189, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 189, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 190, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 190, "usage_type": "name"}, {"api_name": "numpy.arange", "line_number": 194, "usage_type": "call"}, {"api_name": "matplotlib.pyplot.plot", "line_number": 195, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 195, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 196, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 196, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 197, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 197, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 198, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 198, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xticks", "line_number": 199, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 199, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 200, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 200, "usage_type": "name"}, {"api_name": "sklearn.cluster.KMeans", "line_number": 207, "usage_type": "call"}, {"api_name": "sklearn.cluster", "line_number": 207, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.scatter", "line_number": 210, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 210, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.xlabel", "line_number": 211, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 211, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.ylabel", "line_number": 212, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 212, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.title", "line_number": 213, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 213, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.savefig", "line_number": 214, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 214, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.grid", "line_number": 215, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 215, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 216, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 216, "usage_type": "name"}]} +{"seq_id": "22518199097", "text": "#!/usr/bin/env python\n# -*- coding:utf-8 -*-\nfrom typing import List\n\nclass Solution:\n def evaluate(self, s: str, knowledge: List[List[str]]) -> str:\n knowledge = {k: v for k, v in knowledge}\n key = \"\"\n for ch in s:\n if ch == \"(\":\n key = \"\"\n elif ch == \")\":\n v = knowledge.get(key, \"?\")\n s = s.replace(\"({})\".format(key), v)\n key = \"\"\n elif key:\n key += ch\n return s", "repo_name": "ftakanashi/JobProjects", "sub_path": "LeetCode/1807.替换字符串中的括号内容/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 505, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "77", "api": [{"api_name": "typing.List", "line_number": 6, "usage_type": "name"}]} +{"seq_id": "13267224658", "text": "#!/usr/bin/env python3\n\"\"\"\n This is python2 + python3 compatible and has no third\n party dependencies.\n\"\"\"\n\nimport sys\nimport time\nimport os\nimport pwd\nimport grp\nfrom hashlib import md5\nimport json\n\nfrom ipdb import set_trace\n\ntarget_dir = \"\"\noutput_file = \"\"\nblacklist_dirs = [\n \"/tmp/\",\n \"/proc/\",\n \"/run/\",\n \"/dev/\",\n \"/sys/\",\n \"/lost+found\",\n \"/usr/share/man/\",\n \"/usr/include/\",\n]\n\ncounter_files = counter_dirs = 0\n\nbig_data = {}\nerrors = []\n\n\ndef parse_args():\n global target_dir, output_file\n if len(sys.argv) != 3:\n print(\"ERROR: {} \".format(sys.argv[0]))\n sys.exit(1)\n output_file = sys.argv[1]\n target_dir = sys.argv[2]\n\n\ndef get_perm(path):\n global errors\n file_info = {}\n file_info['type'] = 'dir'\n try:\n s = os.stat(path)\n file_info['owner'] = pwd.getpwuid(s.st_uid).pw_name\n file_info['group'] = grp.getgrgid(s.st_gid).gr_name\n file_info['permissions'] = oct(s.st_mode)[-4:]\n except (KeyError, OSError) as e:\n msg = \"{}: {}\".format(path, e)\n errors.append(msg)\n return file_info\n\n\ndef process_file(f_abs):\n file_info = get_perm(f_abs)\n file_info['type'] = 'file' # get_perm sets fix dir. we overwrite here\n file_info['can_read'] = True\n try:\n with open(f_abs, \"rb\") as f:\n m = md5(f.read())\n file_info['md5'] = m.hexdigest()\n except Exception:\n file_info['can_read'] = False\n return file_info\n\n\ndef extract_files_and_dirs(target_dir):\n global counter_dirs, counter_files, big_data\n for dirpath, directories, filenames in os.walk(target_dir):\n\n # process all files\n for f in filenames:\n f_abs = os.path.join(dirpath, f)\n if is_blacklisted(f_abs):\n continue\n counter_files += 1\n print(\"FILE {}\".format(f_abs))\n file_info = process_file(f_abs)\n big_data[f_abs] = file_info\n\n # process all dirs\n for d in directories:\n d_abs = os.path.join(dirpath, d)\n if is_blacklisted(d_abs):\n continue\n counter_dirs += 1\n print(\"DIR {}\".format(d_abs))\n file_info = get_perm(d_abs)\n big_data[d_abs] = file_info\n print(\"\\nProcessed {} files and {} dirs from {}\".format(counter_files, \n counter_dirs,\n target_dir))\n\n\ndef is_blacklisted(d_abs):\n for blacklisted_dir in blacklist_dirs:\n if d_abs.startswith(blacklisted_dir):\n print(\"BLACKLIST {}\".format(d_abs))\n return True\n return False\n\n\ndef dump():\n with open(output_file, \"w\") as f:\n json.dump(big_data, f)\n print(\"Data written to {}\".format(output_file))\n\n\nif __name__ == '__main__':\n parse_args()\n start = time.time()\n extract_files_and_dirs(target_dir)\n dump()\n end = time.time()\n print(\"We needed {} seconds\".format(int(end-start)))\n if len(errors) > 0:\n print(\"\\nWe got the following errors:\")\n for error in errors:\n print(error)\n", "repo_name": "kmille/rusted-in-the-air", "sub_path": "lauf.py", "file_name": "lauf.py", "file_ext": "py", "file_size_in_byte": 3221, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "sys.argv", "line_number": 38, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 39, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 40, "usage_type": "call"}, {"api_name": "sys.argv", "line_number": 41, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 42, "usage_type": "attribute"}, {"api_name": "os.stat", "line_number": 50, "usage_type": "call"}, {"api_name": "pwd.getpwuid", "line_number": 51, "usage_type": "call"}, {"api_name": "grp.getgrgid", "line_number": 52, "usage_type": "call"}, {"api_name": "hashlib.md5", "line_number": 66, "usage_type": "call"}, {"api_name": "os.walk", "line_number": 75, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 79, "usage_type": "call"}, {"api_name": "os.path", "line_number": 79, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 89, "usage_type": "call"}, {"api_name": "os.path", "line_number": 89, "usage_type": "attribute"}, {"api_name": "json.dump", "line_number": 111, "usage_type": "call"}, {"api_name": "time.time", "line_number": 117, "usage_type": "call"}, {"api_name": "time.time", "line_number": 120, "usage_type": "call"}]} +{"seq_id": "29275175134", "text": "import os, sys, math, pygame, pygame.mixer, euclid, time\nfrom pygame.locals import *\nimport random\n\nblack = 0, 0, 0\nwhite = 255, 255, 255\nred = 255, 0, 0\ngreen = 0, 255, 0\nblue = 0, 0, 255\nyellow = 255, 255, 0\ntimediff = 1\ncolors = [white, green, blue, red, yellow]\nscreen = None\nscreen_size = None\nclock = pygame.time.Clock()\ncenter_circle = None\nimage = None\nrect = None\n\n\ndef initialize():\n global center_circle, Road1, Road2, screen, screen_size\n screen_size = screen_width, screen_height = 300, 300\n screen = pygame.display.set_mode(screen_size)\n clock = pygame.time.Clock()\n pygame.display.set_caption(\"Assignment 1\")\n screen.fill(white)\n\nclass Mycircle:\n def __init__(self, position, size, color = (255, 255, 255),velocity = euclid.Vector2(0, 0) , width = 0):\n self.position = position\n self.size = size\n self.color = color\n self.width = width\n self.velocity = velocity\n self.display()\n\n def display(self):\n rx, ry = int(self.position.x), int(self.position.y)\n pygame.draw.circle(screen, self.color, (rx, ry), self.size, self.width)\n\n def changeColor(self, color):\n self.color = color\n\n def move(self ,position):\n self.changeColor(white)\n self.display()\n self.position = position\n self.changeColor(green)\n self.display()\n\n def change_velocity(self, velocity):\n self.velocity = velocity\n\nclass environment:\n def __init__(self, center, radius, angularVelocity, initialAngle):\n C = Mycircle(center, 4, black)\n size = 3\n initialPosition = center + euclid.Vector2(math.cos(initialAngle), math.sin(initialAngle))\n self.agent = Mycircle(initialPosition, size, green, width = 3)\n time = 0\n flag = True\n dtime_ms = clock.tick(60)\n timediff = dtime_ms / 10000.0\n while flag:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n flag = False\n current = AgentFunction(radius, center, initialAngle, time, angularVelocity)\n self.agent.move(current)\n time -= timediff\n pygame.display.flip()\n\n\ndef check(pos1, pos2):\n if pos1.x <= pos2.x+1 and pos1.x > pos2.x-1 and pos1.y <= pos2.y+1 and pos1.y > pos2.y-1:\n return True\n\ndef AgentFunction(radius, center, initialAngle, time, angularVelocity):\n Angle = initialAngle + angularVelocity*time\n position = center + radius * euclid.Vector2(math.cos(Angle), math.sin(Angle))\n return position\n\ndef main():\n initialize()\n env = environment(euclid.Vector2(150, 150), 100, .01 ,0)\n pygame.quit()\n\nmain()", "repo_name": "vampcoder/Artificial-Life-Simulation-Lab", "sub_path": "Lab 1/2.py", "file_name": "2.py", "file_ext": "py", "file_size_in_byte": 2679, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "pygame.time.Clock", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 15, "usage_type": "attribute"}, {"api_name": "pygame.display.set_mode", "line_number": 24, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 24, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 25, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pygame.display.set_caption", "line_number": 26, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 26, "usage_type": "attribute"}, {"api_name": "euclid.Vector2", "line_number": 30, "usage_type": "call"}, {"api_name": "pygame.draw.circle", "line_number": 40, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 40, "usage_type": "attribute"}, {"api_name": "euclid.Vector2", "line_number": 59, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 59, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 59, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 66, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 66, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 67, "usage_type": "attribute"}, {"api_name": "pygame.display.flip", "line_number": 72, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 72, "usage_type": "attribute"}, {"api_name": "euclid.Vector2", "line_number": 81, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 81, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 81, "usage_type": "call"}, {"api_name": "euclid.Vector2", "line_number": 86, "usage_type": "call"}, {"api_name": "pygame.quit", "line_number": 87, "usage_type": "call"}]} +{"seq_id": "17257463006", "text": "import networkx as nx\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\n\n\ndef latlon_to_xy(lat, lon):\n \"\"\"Convert angluar to cartesian coordiantes\n\n latitude is the 90deg - zenith angle in range [-90;90]\n lonitude is the azimuthal angle in range [-180;180]\n \"\"\"\n r = 6371 # https://en.wikipedia.org/wiki/Earth_radius\n theta = math.pi / 2 - math.radians(float(lat))\n phi = math.radians(float(lon))\n x = r * math.sin(theta) * math.cos(phi) # bronstein (3.381a)\n y = r * math.sin(theta) * math.sin(phi)\n return [x, y]\n\n\nG = nx.MultiGraph()\n\ngraph_data_nodes = np.loadtxt('data/reuse/nodes.csv', dtype='str', delimiter=',', encoding=\"utf-8-sig\", skiprows=1)\ngraph_data_edges = np.loadtxt('data/reuse/edges.csv', dtype='str', delimiter=',', encoding=\"utf-8-sig\", skiprows=1)\npath_edges = []\ntry:\n path_edges = np.loadtxt('data/reuse/path_edges.csv', dtype='str', delimiter=',', encoding=\"utf-8-sig\", skiprows=1)\nexcept:\n pass\n\nfor node in graph_data_nodes:\n x, y = latlon_to_xy((node[0]), (node[1]))\n G.add_node(node[4], pos=(y, -x))\n\nG.add_edges_from(graph_data_edges)\n\npos = nx.get_node_attributes(G, 'pos')\n\nbusNodes = []\nmetroNodes = []\nbusEdges = []\nmetroEdges = []\nwalkNodes = []\nwalkEdges = []\npathEdges = []\n\npathEdgeExist = set()\n\nfor edge in path_edges:\n pathEdgeExist.add(edge[0] + \"::\" + edge[1])\n\nfor node in G.nodes:\n if 'bus' in node:\n busNodes.append(node)\n elif 'metro' in node:\n metroNodes.append(node)\n elif 'walk' in node:\n walkNodes.append(node)\n\nfor node in G.edges:\n if (node[0] + \"::\" + node[1] in pathEdgeExist or node[1] + \"::\" + node[0] in pathEdgeExist):\n pathEdges.append(node)\n elif ('bus' in node[0] and 'bus' in node[1]):\n busEdges.append(node)\n elif ('metro' in node[0] and 'metro' in node[1]):\n metroEdges.append(node)\n elif ('walk' in node[0] and 'walk' in node[1]):\n walkEdges.append(node)\n\n# Draw Bus Nodes\ntry:\n nx.draw_networkx_nodes(G, pos, nodelist=busNodes, node_size=15, node_color='tab:blue', alpha=0.5)\nexcept nx.NetworkXError:\n print('Error drawing bus nodes')\n\n# Draw Metro Nodes\ntry:\n nx.draw_networkx_nodes(G, pos, nodelist=metroNodes, node_size=25, node_color='tab:red', alpha=0.5)\nexcept nx.NetworkXError:\n print('Error drawing metro nodes')\n\n# Draw Walk Nodes\n# try: nx.draw_networkx_nodes(G, pos, nodelist=walkNodes, node_size=5, node_color='tab:green', alpha=0.9)\n# except nx.NetworkXError:\n# print('Error drawing walk nodes')\n\n# Draw Bus Edges\ntry:\n nx.draw_networkx_edges(G, pos, edgelist=busEdges, edge_color='tab:blue', alpha=0.2)\nexcept nx.NetworkXError:\n print('Error drawing bus edges')\n\n# Draw Metro Edges\ntry:\n nx.draw_networkx_edges(G, pos, edgelist=metroEdges, edge_color='tab:red', alpha=0.2)\nexcept nx.NetworkXError:\n print('Error drawing metro edges')\n\n# Draw Walk Edges\ntry:\n nx.draw_networkx_edges(G, pos, edgelist=walkEdges, edge_color='tab:green', alpha=0.2)\nexcept nx.NetworkXError:\n print('Error drawing walk edges')\n\n# Draw Path Edges\ntry:\n nx.draw_networkx_edges(G, pos, edgelist=pathEdges, edge_color='black', width=5)\nexcept nx.NetworkXError:\n print('Error drawing path edges')\n\n\n# Draw Labels\t\n# nx.draw_networkx_labels(G, pos, font_size=10)\n\ndef maximize():\n plot_backend = plt.get_backend()\n mng = plt.get_current_fig_manager()\n if plot_backend == 'TkAgg':\n mng.resize(*mng.window.maxsize())\n elif plot_backend == 'wxAgg':\n mng.frame.Maximize(True)\n elif plot_backend == 'Qt4Agg':\n mng.window.showMaximized()\n\n\nplt.axis('equal')\nmaximize()\nplt.show()\n", "repo_name": "rocas777/RouteFInder", "sub_path": "networkx/view.py", "file_name": "view.py", "file_ext": "py", "file_size_in_byte": 3632, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 5, "dataset": "github-code", "pt": "77", "api": [{"api_name": "math.pi", "line_number": 14, "usage_type": "attribute"}, {"api_name": "math.radians", "line_number": 14, "usage_type": "call"}, {"api_name": "math.radians", "line_number": 15, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 16, "usage_type": "call"}, {"api_name": "math.cos", "line_number": 16, "usage_type": "call"}, {"api_name": "math.sin", "line_number": 17, "usage_type": "call"}, {"api_name": "networkx.MultiGraph", "line_number": 21, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 23, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 24, "usage_type": "call"}, {"api_name": "numpy.loadtxt", "line_number": 27, "usage_type": "call"}, {"api_name": "networkx.get_node_attributes", "line_number": 37, "usage_type": "call"}, {"api_name": "networkx.draw_networkx_nodes", "line_number": 72, "usage_type": "call"}, {"api_name": "networkx.NetworkXError", "line_number": 73, "usage_type": "attribute"}, {"api_name": "networkx.draw_networkx_nodes", "line_number": 78, "usage_type": "call"}, {"api_name": "networkx.NetworkXError", "line_number": 79, "usage_type": "attribute"}, {"api_name": "networkx.draw_networkx_edges", "line_number": 89, "usage_type": "call"}, {"api_name": "networkx.NetworkXError", "line_number": 90, "usage_type": "attribute"}, {"api_name": "networkx.draw_networkx_edges", "line_number": 95, "usage_type": "call"}, {"api_name": "networkx.NetworkXError", "line_number": 96, "usage_type": "attribute"}, {"api_name": "networkx.draw_networkx_edges", "line_number": 101, "usage_type": "call"}, {"api_name": "networkx.NetworkXError", "line_number": 102, "usage_type": "attribute"}, {"api_name": "networkx.draw_networkx_edges", "line_number": 107, "usage_type": "call"}, {"api_name": "networkx.NetworkXError", "line_number": 108, "usage_type": "attribute"}, {"api_name": "matplotlib.pyplot.get_backend", "line_number": 116, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 116, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.get_current_fig_manager", "line_number": 117, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 117, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.axis", "line_number": 126, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 126, "usage_type": "name"}, {"api_name": "matplotlib.pyplot.show", "line_number": 128, "usage_type": "call"}, {"api_name": "matplotlib.pyplot", "line_number": 128, "usage_type": "name"}]} +{"seq_id": "13021856190", "text": "import pytest\n\nfrom company_name import CompanyNameAgent, Reason, Result, Solution\n\n\n@pytest.mark.parametrize(\n (\"first\", \"second\"),\n (\n ((), ()),\n ((), (\"COMPANY NAME\",)),\n ((\"COMPANY NAME\",), ()),\n ((\"\",), (\"\",)),\n ((None,), (None,)),\n ((\"\",), (None,)),\n ),\n)\ndef test_when_no_names(first, second):\n print(first, second)\n result = CompanyNameAgent().resolve(first, second)\n print(result)\n assert result == Result(Solution.NO_DATA, Reason())\n\n\n@pytest.mark.parametrize(\n (\"first\", \"second\", \"expected_solution\"),\n (\n ((\"Google limited liability company\",), (\"GOOGLE LLC\",), Solution.MATCH),\n ((\"Google\",), (\"Facebook\",), Solution.NO_MATCH),\n ),\n)\ndef test_when_one_name(first, second, expected_solution):\n print(first, second)\n result = CompanyNameAgent().resolve(first, second)\n assert result.solution == expected_solution\n\n\n@pytest.mark.parametrize(\n (\"first\", \"second\"),\n (\n ((\"GOOGLE LLC\",), (\"GOOGLE LLC\",)),\n ((\"GOOGLE LLC\",), (\"GOOGLE LLC\", \"FACEBOOK\", \"AMAZON.COM\")),\n (\n (\n \"AGRICULTURAL BANK OF CHINA\",\n \"POWERCHINA\",\n ),\n (\"AGRICULTURAL BANK OF CHINA\",),\n ),\n ),\n)\ndef test_choose_exact_name(first, second):\n print(first, second)\n result = CompanyNameAgent().resolve(first, second)\n assert result.solution == Solution.MATCH\n assert (\n result.reason.results[0].alerted_party_name == result.reason.results[0].watchlist_party_name\n )\n\n\n@pytest.mark.parametrize((\"first\", \"second\"), (((\"THE VTB BANK\",), (\"SAFE NAME\",)),))\ndef test_blacklist(first, second):\n print(first, second)\n assert CompanyNameAgent().resolve(first, second).solution == Solution.MATCH\n\n\n@pytest.mark.parametrize(\n (\"first\", \"second\"),\n (\n ((\"THE NATIONAL COMMERCIAL BANK\",), (\"NCB\",)),\n (\n (\"THE NATIONAL COMMERCIAL BANK\",),\n (\"NCB\", \"BANK OF CHINA\", \"NATIONAL TREASURY\"),\n ),\n ),\n)\ndef test_abbreviation(first, second):\n print(first, second)\n result = CompanyNameAgent().resolve(first, second)\n assert result.solution == Solution.MATCH\n\n\n@pytest.mark.parametrize(\n (\"first\", \"second\", \"abbreviation_not_chosen\"),\n (\n (\n (\"THE NATIONAL COMMERCIAL BANK\",),\n (\"NCB\", \"THE NATIONAL COMMERCIAL BANK\"),\n \"NCB\",\n ),\n ),\n)\ndef test_choose_exact_name_over_abbreviation(first, second, abbreviation_not_chosen):\n print(first, second)\n result = CompanyNameAgent()._resolve(first, second)\n assert result.solution == Solution.MATCH\n assert abbreviation_not_chosen != result.reason.results[0].watchlist_party_name\n\n\n@pytest.mark.parametrize(\n \"first, second\",\n [\n ((\"OSANG HEAL THCARE CO LTD INFOPIA\",), (\"INFOPIA CO LTD\",)),\n ((\"SCCM DYBNG AND PRINTING CO. ATO\",), (\"ATO, OOO\",)),\n ],\n)\ndef test_match_when_base_name_after_legal_take_it_from_other(first, second):\n print(first, second)\n result = CompanyNameAgent().resolve(first, second)\n assert result.solution == Solution.MATCH\n", "repo_name": "ngraczykowski/iris-root", "sub_path": "modules/organization-name-agent/tests/agent/test_company_name_agent.py", "file_name": "test_company_name_agent.py", "file_ext": "py", "file_size_in_byte": 3131, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "company_name.CompanyNameAgent", "line_number": 19, "usage_type": "call"}, {"api_name": "company_name.Result", "line_number": 21, "usage_type": "call"}, {"api_name": "company_name.Solution.NO_DATA", "line_number": 21, "usage_type": "attribute"}, {"api_name": "company_name.Solution", "line_number": 21, "usage_type": "name"}, {"api_name": "company_name.Reason", "line_number": 21, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 6, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 6, "usage_type": "attribute"}, {"api_name": "company_name.CompanyNameAgent", "line_number": 33, "usage_type": "call"}, {"api_name": "pytest.mark.parametrize", "line_number": 24, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 24, "usage_type": "attribute"}, {"api_name": "company_name.Solution.MATCH", "line_number": 27, "usage_type": "attribute"}, {"api_name": "company_name.Solution", "line_number": 27, "usage_type": "name"}, {"api_name": "company_name.Solution.NO_MATCH", "line_number": 28, "usage_type": "attribute"}, {"api_name": "company_name.Solution", "line_number": 28, "usage_type": "name"}, {"api_name": "company_name.CompanyNameAgent", "line_number": 53, "usage_type": "call"}, {"api_name": "company_name.Solution.MATCH", "line_number": 54, "usage_type": "attribute"}, {"api_name": "company_name.Solution", "line_number": 54, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 37, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 37, "usage_type": "attribute"}, {"api_name": "company_name.CompanyNameAgent", "line_number": 63, "usage_type": "call"}, {"api_name": "company_name.Solution.MATCH", "line_number": 63, "usage_type": "attribute"}, {"api_name": "company_name.Solution", "line_number": 63, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 60, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 60, "usage_type": "attribute"}, {"api_name": "company_name.CompanyNameAgent", "line_number": 78, "usage_type": "call"}, {"api_name": "company_name.Solution.MATCH", "line_number": 79, "usage_type": "attribute"}, {"api_name": "company_name.Solution", "line_number": 79, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 66, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 66, "usage_type": "attribute"}, {"api_name": "company_name.CompanyNameAgent", "line_number": 94, "usage_type": "call"}, {"api_name": "company_name.Solution.MATCH", "line_number": 95, "usage_type": "attribute"}, {"api_name": "company_name.Solution", "line_number": 95, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 82, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 82, "usage_type": "attribute"}, {"api_name": "company_name.CompanyNameAgent", "line_number": 108, "usage_type": "call"}, {"api_name": "company_name.Solution.MATCH", "line_number": 109, "usage_type": "attribute"}, {"api_name": "company_name.Solution", "line_number": 109, "usage_type": "name"}, {"api_name": "pytest.mark.parametrize", "line_number": 99, "usage_type": "call"}, {"api_name": "pytest.mark", "line_number": 99, "usage_type": "attribute"}]} +{"seq_id": "26208507944", "text": "from diablo import std_commit\nfrom diablo.jobs.tasks.queued_emails_task import QueuedEmailsTask\nfrom diablo.lib.util import utc_now\nfrom diablo.models.course_preference import CoursePreference\nfrom diablo.models.queued_email import QueuedEmail\nfrom diablo.models.sent_email import SentEmail\nfrom diablo.models.sis_section import SisSection\nfrom flask import current_app as app\n\n\nclass TestQueuedEmailsTask:\n\n def test_no_email_queued(self):\n \"\"\"Do nothing if 'queued_emails' table is empty.\"\"\"\n term_id = app.config['CURRENT_TERM_ID']\n QueuedEmailsTask().run()\n std_commit(allow_test_environment=True)\n # Verify that the next job run will have zero queued emails.\n assert len(QueuedEmail.get_all(term_id=term_id)) == 0\n\n QueuedEmailsTask().run()\n std_commit(allow_test_environment=True)\n # If we reach this point then no error occurred.\n\n def test_send_invitation_emails(self):\n \"\"\"Send all email in 'queued_emails' table.\"\"\"\n term_id = app.config['CURRENT_TERM_ID']\n courses = SisSection.get_courses(section_ids=[50000, 50001], term_id=term_id)\n email_template_type = 'invitation'\n\n for course in courses:\n for instructor in course['instructors']:\n QueuedEmail.create(course['sectionId'], email_template_type, term_id, recipient=instructor)\n std_commit(allow_test_environment=True)\n\n def _get_emails_to_courses():\n emails_sent = []\n for c in courses:\n emails_sent.extend(\n _get_emails_sent(\n email_template_type=email_template_type,\n section_id=c['sectionId'],\n term_id=term_id,\n ),\n )\n return emails_sent\n\n before = utc_now()\n emails_sent_before = _get_emails_to_courses()\n # Run the job\n QueuedEmailsTask().run()\n std_commit(allow_test_environment=True)\n\n # Expect one email per instructor\n emails_sent_after = _get_emails_to_courses()\n assert len(emails_sent_after) == len(emails_sent_before) + 3\n\n def _find_email(section_id, uid):\n return next((e for e in emails_sent_after if e.section_id == section_id and e.sent_at > before and uid == e.recipient_uid), None)\n\n for course in courses:\n for instructor in course['instructors']:\n sent_email = _find_email(section_id=course['sectionId'], uid=instructor['uid'])\n assert sent_email\n email_json = sent_email.to_api_json()\n assert email_json['recipientUid'] == instructor['uid']\n assert email_json['sectionId'] == course['sectionId']\n assert email_json['templateType'] == email_template_type\n assert email_json['termId'] == term_id\n assert email_json['sentAt']\n\n def test_course_has_opted_out(self):\n \"\"\"Do not send email to courses that have opted out.\"\"\"\n def _emails_sent():\n return _get_emails_sent(email_template_type=email_template_type, section_id=section_id, term_id=term_id)\n\n term_id = app.config['CURRENT_TERM_ID']\n section_id = 50000\n CoursePreference.update_opt_out(term_id=term_id, section_id=section_id, opt_out=True)\n email_template_type = 'invitation'\n recipient = {\n 'name': 'William Peter Blatty',\n 'uid': '10001',\n }\n QueuedEmail.create(section_id, email_template_type, term_id, recipient=recipient)\n std_commit(allow_test_environment=True)\n\n emails_sent_before = _emails_sent()\n # Run the job\n QueuedEmailsTask().run()\n std_commit(allow_test_environment=True)\n\n # Expect no emails sent\n emails_sent_after = _emails_sent()\n assert len(emails_sent_after) == len(emails_sent_before)\n assert list(map(lambda e: e.id, emails_sent_before)) == list(map(lambda e: e.id, emails_sent_after))\n\n def test_queued_email_for_admin(self):\n \"\"\"Certain email template types are for admin recipients only.\"\"\"\n def _emails_sent():\n return _get_emails_sent(email_template_type=email_template_type, section_id=section_id, term_id=term_id)\n\n term_id = app.config['CURRENT_TERM_ID']\n section_id = 50005\n email_template_type = 'admin_alert_room_change'\n recipient_uid = app.config['EMAIL_DIABLO_ADMIN_UID']\n QueuedEmail.create(\n section_id,\n email_template_type,\n term_id,\n recipient={\n 'name': 'Course Capture Admin',\n 'uid': recipient_uid,\n },\n )\n std_commit(allow_test_environment=True)\n\n before = utc_now()\n emails_sent_before = _emails_sent()\n # Run the job\n QueuedEmailsTask().run()\n std_commit(allow_test_environment=True)\n\n # Expect email to admin email address\n emails_sent_after = _emails_sent()\n assert len(emails_sent_after) == len(emails_sent_before) + 1\n\n sent_email = next((e for e in emails_sent_after if e.section_id == section_id and e.sent_at > before), None)\n assert sent_email\n email_json = sent_email.to_api_json()\n assert email_json['recipientUid'] == recipient_uid\n assert email_json['sectionId'] == section_id\n assert email_json['templateType'] == email_template_type\n assert email_json['termId'] == term_id\n assert email_json['sentAt']\n\n\ndef _get_emails_sent(email_template_type, section_id, term_id):\n return SentEmail.get_emails_of_type(\n section_ids=[section_id],\n template_type=email_template_type,\n term_id=term_id,\n )\n", "repo_name": "ets-berkeley-edu/diablo", "sub_path": "tests/test_tasks/test_queued_emails_task.py", "file_name": "test_queued_emails_task.py", "file_ext": "py", "file_size_in_byte": 5790, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 2, "dataset": "github-code", "pt": "77", "api": [{"api_name": "flask.current_app.config", "line_number": 15, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 15, "usage_type": "name"}, {"api_name": "diablo.jobs.tasks.queued_emails_task.QueuedEmailsTask", "line_number": 16, "usage_type": "call"}, {"api_name": "diablo.std_commit", "line_number": 17, "usage_type": "call"}, {"api_name": "diablo.models.queued_email.QueuedEmail.get_all", "line_number": 19, "usage_type": "call"}, {"api_name": "diablo.models.queued_email.QueuedEmail", "line_number": 19, "usage_type": "name"}, {"api_name": "diablo.jobs.tasks.queued_emails_task.QueuedEmailsTask", "line_number": 21, "usage_type": "call"}, {"api_name": "diablo.std_commit", "line_number": 22, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 27, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 27, "usage_type": "name"}, {"api_name": "diablo.models.sis_section.SisSection.get_courses", "line_number": 28, "usage_type": "call"}, {"api_name": "diablo.models.sis_section.SisSection", "line_number": 28, "usage_type": "name"}, {"api_name": "diablo.models.queued_email.QueuedEmail.create", "line_number": 33, "usage_type": "call"}, {"api_name": "diablo.models.queued_email.QueuedEmail", "line_number": 33, "usage_type": "name"}, {"api_name": "diablo.std_commit", "line_number": 34, "usage_type": "call"}, {"api_name": "diablo.lib.util.utc_now", "line_number": 48, "usage_type": "call"}, {"api_name": "diablo.jobs.tasks.queued_emails_task.QueuedEmailsTask", "line_number": 51, "usage_type": "call"}, {"api_name": "diablo.std_commit", "line_number": 52, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 77, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 77, "usage_type": "name"}, {"api_name": "diablo.models.course_preference.CoursePreference.update_opt_out", "line_number": 79, "usage_type": "call"}, {"api_name": "diablo.models.course_preference.CoursePreference", "line_number": 79, "usage_type": "name"}, {"api_name": "diablo.models.queued_email.QueuedEmail.create", "line_number": 85, "usage_type": "call"}, {"api_name": "diablo.models.queued_email.QueuedEmail", "line_number": 85, "usage_type": "name"}, {"api_name": "diablo.std_commit", "line_number": 86, "usage_type": "call"}, {"api_name": "diablo.jobs.tasks.queued_emails_task.QueuedEmailsTask", "line_number": 90, "usage_type": "call"}, {"api_name": "diablo.std_commit", "line_number": 91, "usage_type": "call"}, {"api_name": "flask.current_app.config", "line_number": 103, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 103, "usage_type": "name"}, {"api_name": "flask.current_app.config", "line_number": 106, "usage_type": "attribute"}, {"api_name": "flask.current_app", "line_number": 106, "usage_type": "name"}, {"api_name": "diablo.models.queued_email.QueuedEmail.create", "line_number": 107, "usage_type": "call"}, {"api_name": "diablo.models.queued_email.QueuedEmail", "line_number": 107, "usage_type": "name"}, {"api_name": "diablo.std_commit", "line_number": 116, "usage_type": "call"}, {"api_name": "diablo.lib.util.utc_now", "line_number": 118, "usage_type": "call"}, {"api_name": "diablo.jobs.tasks.queued_emails_task.QueuedEmailsTask", "line_number": 121, "usage_type": "call"}, {"api_name": "diablo.std_commit", "line_number": 122, "usage_type": "call"}, {"api_name": "diablo.models.sent_email.SentEmail.get_emails_of_type", "line_number": 139, "usage_type": "call"}, {"api_name": "diablo.models.sent_email.SentEmail", "line_number": 139, "usage_type": "name"}]} +{"seq_id": "3522239538", "text": "import matplotlib.pyplot as plt\n#from matplotlib.axis import Ticker\nimport streamlit as st \nimport requests \nimport simfin as sf\nimport pandas as pd\nimport numpy as np\nfrom simfin.names import *\n\nst.set_page_config(layout=\"wide\")\n\nsf.set_data_dir('~/simfin_data/')\n\nsf.set_api_key(api_key=\"enter API key\")\n\nsymbol = st.sidebar.text_input (\"Symbol\", value = \"MSFT\")\n\nscreen = st.sidebar.selectbox (\"View\", (\"Company info\", \"Income statement\", \"Balance sheet\", \"Cash flow statement\"))\n\n#st.title(symbol + ' ' + screen)\n\n#if screen == \"Company info\":\n #col1, col2, col3 = st.columns([3,4,3])\n #with col2:\n #st.title(symbol + ' ' + screen)\n\n #col1, col2 = st.columns ([1,2])\n #with col1:\n\n #df_company = sf.load_companies(market='us', index= ['Ticker'])\n\n #df_shareprices = sf.load_shareprices(variant='daily', market='us', index= ['Ticker', DATE])\n\n #st.markdown(\"

Info

\", unsafe_allow_html=True)\n\n #st.subheader ('Company name')\n #df_company.loc [symbol] [COMPANY_NAME]\n\n #df_price_ratios = sf.load_derived_shareprices(variant='daily', market='us', index= ['Ticker', DATE])\n\n #st.subheader ('Market-Cap')\n #df_banaan = df_price_ratios.loc [symbol] [MCAP] \n #df_banaan.loc ['2022-03-14']\n\n #st.subheader ('P/E Ratio')\n #df_pe_today = df_price_ratios.loc [symbol] [PE_QUARTERLY]\n #df_pe_today.loc ['2022-03-14']\n\n\n #with col2:\n\n #df_shareprices = df_shareprices.loc [symbol]\n\n #st.markdown(\"

Share price

\", unsafe_allow_html=True)\n\n # st.line_chart(df_shareprices [SHARE_PRICE_CLOSE])\n\n\n #st.markdown(\"

Some title

\", unsafe_allow_html=True)\n\nif screen == \"Income statement\":\n col1, col2, col3 = st.columns([3,4,3])\n with col2:\n st.title(symbol + ' ' + screen)\n df_income = sf.load_income(variant='annual', market='us', index=['Ticker', FISCAL_YEAR])\n df_income2 = df_income.loc [symbol][[REVENUE, COST_REVENUE, GROSS_PROFIT, OPERATING_EXPENSES, SELLING_GEN_ADMIN\n,RESEARCH_DEV\n,DEPR_AMOR, OP_INCOME\n,NON_OP_INCOME\n,INTEREST_EXP_NET\n, PRETAX_INCOME_LOSS_ADJ\n,ABNORM_GAIN_LOSS,PRETAX_INCOME_LOSS, \nINCOME_TAX,INCOME_CONT_OP, NET_EXTR_GAIN_LOSS\n, NET_INCOME]]\n\n df_tr_income = df_income2.transpose()\n\n df_income_done = df_tr_income[df_tr_income.columns[::-1]]\n\n df_income_done\n\n col1, col2, col3 = st.columns([3,2,7])\n\n with col1:\n st.markdown(\"

Select

\", unsafe_allow_html=True)\n\n select = st.multiselect ( 'Type something', ['Revenue', 'Gross profit', 'Net income'], ['Revenue'])\n\n with col2:\n pass\n\n\n with col3:\n if select == ['Revenue']:\n st.markdown(\"

Revenue

\", unsafe_allow_html=True)\n st.bar_chart(df_income_done.loc [REVENUE])\n\n if select == ['Gross profit']:\n st.markdown(\"

Gross profit

\", unsafe_allow_html=True)\n st.bar_chart(df_income_done.loc [GROSS_PROFIT])\n\n if select == ['Net income']:\n st.markdown(\"

Net income

\", unsafe_allow_html=True)\n st.bar_chart(df_income_done.loc [NET_INCOME])\n\n if select == ['Revenue', 'Net income']:\n st.markdown(\"

Revenue

\", unsafe_allow_html=True)\n st.bar_chart(df_income_done.loc [REVENUE])\n st.markdown(\"

Net income

\", unsafe_allow_html=True)\n st.bar_chart(df_income_done.loc [NET_INCOME])\n\n\n\nif screen == \"Balance sheet\":\n col1, col2, col3 = st.columns([3,4,3])\n with col2:\n st.title(symbol + ' ' + screen)\n df_balance = sf.load_balance(variant='annual', market='us', index=['Ticker', 'Fiscal Year']) \n df_balance = df_balance.loc [symbol] [[CASH_EQUIV_ST_INVEST\n, ACC_NOTES_RECV,INVENTORIES, TOTAL_CUR_ASSETS\n, PROP_PLANT_EQUIP_NET, LT_INVEST_RECV, OTHER_LT_ASSETS, TOTAL_NONCUR_ASSETS, TOTAL_ASSETS, PAYABLES_ACCRUALS\n, ST_DEBT, TOTAL_CUR_LIAB\n, LT_DEBT, TOTAL_NONCUR_LIAB, TOTAL_LIABILITIES, SHARE_CAPITAL_ADD, TREASURY_STOCK, RETAINED_EARNINGS, TOTAL_EQUITY, TOTAL_LIAB_EQUITY]]\n\n df_tr_balance = df_balance.transpose()\n\n df_tr_balance[df_tr_balance.columns[::-1]]\n\n col1, col2, col3 = st.columns([3,2,7])\n with col1:\n st.markdown(\"

Select

\", unsafe_allow_html=True)\n\n select_balance = st.selectbox (\"Select\", (\"Total assets\", \"Total liabilities\", \"Shareholders equity\"))\n\n with col2:\n pass\n\n\n with col3:\n if select_balance == 'Total assets':\n st.markdown(\"

Total assets

\", unsafe_allow_html=True)\n st.bar_chart(df_tr_balance.loc [TOTAL_ASSETS])\n\n if select_balance == 'Total liabilities':\n st.markdown(\"

Total liabilities

\", unsafe_allow_html=True)\n st.bar_chart(df_tr_balance.loc [TOTAL_LIABILITIES])\n\n if select_balance == 'Shareholders equity':\n st.markdown(\"

Shareholders equity

\", unsafe_allow_html=True)\n st.bar_chart(df_tr_balance.loc [TOTAL_EQUITY])\n\n\n\n\nif screen == \"Cash flow statement\":\n col1, col2, col3 = st.columns([3,4,3])\n with col2:\n st.title(symbol + ' ' + screen)\n df_cashflow = sf.load_cashflow(variant='annual', market='us', index= [ 'Ticker', 'Fiscal Year'])\n df_cashflow = df_cashflow.loc [symbol] [[NET_INCOME_START, DEPR_AMOR, NON_CASH_ITEMS, CHG_WORKING_CAPITAL, CHG_ACCOUNTS_RECV, CHG_INVENTORIES, CHG_ACC_PAYABLE, CHG_OTHER, NET_CASH_OPS, CHG_FIX_ASSETS_INT, NET_CHG_LT_INVEST, NET_CASH_ACQ_DIVEST, NET_CASH_INV, DIVIDENDS_PAID, CASH_REPAY_DEBT, CASH_REPURCHASE_EQUITY, NET_CASH_FIN, NET_CHG_CASH]]\n\n df_tr_cashflow = df_cashflow.transpose ()\n\n df_tr_cashflow[df_tr_cashflow.columns[::-1]]\n\n col1, col2, col3 = st.columns([3,2,7])\n with col1:\n st.markdown(\"

Select

\", unsafe_allow_html=True)\n\n select_cashflow = st.selectbox (\"Select\", (\"Net change in cash\", \"Net cash from operating activities\", \"Net income\"))\n\n with col2:\n pass\n\n\n with col3:\n if select_cashflow == 'Net change in cash':\n st.markdown(\"

Net change in cash

\", unsafe_allow_html=True)\n st.bar_chart(df_tr_cashflow.loc [NET_CHG_CASH])\n\n if select_cashflow == 'Net cash from operating activities':\n st.markdown(\"

Net cash from operating activitiess

\", unsafe_allow_html=True)\n st.bar_chart(df_tr_cashflow.loc [NET_CASH_OPS])\n\n if select_cashflow == 'Net income':\n st.markdown(\"

Net income

\", unsafe_allow_html=True)\n st.bar_chart(df_tr_cashflow.loc [NET_INCOME_START])\n", "repo_name": "valueinvestorr/us-stock-data", "sub_path": "dashboard.py", "file_name": "dashboard.py", "file_ext": "py", "file_size_in_byte": 6917, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "streamlit.set_page_config", "line_number": 10, "usage_type": "call"}, {"api_name": "simfin.set_data_dir", "line_number": 12, "usage_type": "call"}, {"api_name": "simfin.set_api_key", "line_number": 14, "usage_type": "call"}, {"api_name": "streamlit.sidebar.text_input", "line_number": 16, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 16, "usage_type": "attribute"}, {"api_name": "streamlit.sidebar.selectbox", "line_number": 18, "usage_type": "call"}, {"api_name": "streamlit.sidebar", "line_number": 18, "usage_type": "attribute"}, {"api_name": "streamlit.columns", "line_number": 62, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 64, "usage_type": "call"}, {"api_name": "simfin.load_income", "line_number": 65, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 82, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 85, "usage_type": "call"}, {"api_name": "streamlit.multiselect", "line_number": 87, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 95, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 96, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 99, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 100, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 103, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 104, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 107, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 108, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 109, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 110, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 115, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 117, "usage_type": "call"}, {"api_name": "simfin.load_balance", "line_number": 118, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 129, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 131, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 133, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 141, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 142, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 145, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 146, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 149, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 150, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 156, "usage_type": "call"}, {"api_name": "streamlit.title", "line_number": 158, "usage_type": "call"}, {"api_name": "simfin.load_cashflow", "line_number": 159, "usage_type": "call"}, {"api_name": "streamlit.columns", "line_number": 166, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 168, "usage_type": "call"}, {"api_name": "streamlit.selectbox", "line_number": 170, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 178, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 179, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 182, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 183, "usage_type": "call"}, {"api_name": "streamlit.markdown", "line_number": 186, "usage_type": "call"}, {"api_name": "streamlit.bar_chart", "line_number": 187, "usage_type": "call"}]} +{"seq_id": "33291380219", "text": "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 8 09:55:38 2018\n\n@author: smh\n\"\"\"\n\nimport os\nimport subprocess \nfrom datetime import datetime\nimport LandscapeModel\n\nclass RunFactory(object):\n \n def __init__(self,fdir,fname):\n \"\"\"\n fdir (string): path of project folder\n \"\"\"\n# self.message(\"create runfactory: \" + fdir)\n self.fdir = fdir\n self.fname = fname\n self.runlist = LandscapeModel.utils.ParameterList(self.fdir,self.fname,\",\")\n self.runs = []\n\n def setup(self,key=\"\",printMessage=False,createDatabase=True,**kwargs):\n \n # get list with model runs\n if key != \"\" and key!=\"None\":\n runs = [i for i in self.runlist if i.key == key and i.simulate == True]\n else:\n runs = [i for i in self.runlist if i.simulate == True] \n \n if len(runs)<1:\n raise ValueError(\"No valid run ID found\")\n \n for run in runs: \n if printMessage: self.message(\"# Create catchment\") \n self.runs.append(LandscapeModel.Catchment(run,printMessage=printMessage,\n createDatabase=createDatabase))\n \n def __call__(self,key=\"\",printMessage=False,plotMap=False,**kwargs): \n \"\"\"\n Make a single model run with 'name' or comptue all runs listed in\n run List.\n \"\"\"\n \n # get list with model runs\n if key != \"\" and key!=\"None\":\n runs = [i for i in self.runlist if i.key == key and i.simulation == True]\n else:\n runs = [i for i in self.runlist if i.simulation == True] \n \n if len(runs)<1:\n \n self.message(\"simulation: no run to simulate\")\n \n else:\n \n for run in runs: \n \n if printMessage: self.message(\"simulation: create catchment\") \n print(\"project: \",run.key)\n self.runs.append(LandscapeModel.Catchment(run,printMessage=printMessage))\n \n if plotMap:\n plot=LandscapeModel.utils.Plotting()\n catchment = self.runs[-1]\n plot.CatchmentMap(catchment,withnames=True,\n fpath=os.path.join(catchment.fpath,\"Map.png\"))\n if run.catchment_separation != True:\n print(\"simulation: run\",self.runs[-1].modelrun.key)\n self.runs[-1](**kwargs)\n \n else:\n print(\"simulation: separate\",self.runs[-1].modelrun.key) \n catchSep = LandscapeModel.utils.CatchmentSeparator(self.runs[-1])\n catchSep()\n catchSep.plotMap(fontsize=8,withnames=True)\n print(\"simulation: run\",self.runs[-1].modelrun.key) \n catchSep.run_SolverUnits()\n\n def preprocessing(self,key,\n make_catchcon=False,\n has_scenarios=False):\n \"\"\"\n \"\"\"\n # select specific key if needed\n if key != \"\" and key!=\"None\":\n runs = [i for i in self.runlist if i.key == key and i.preprocessing == True]\n else:\n runs = [i for i in self.runlist if i.preprocessing == True]\n \n # conduct pre-processing\n for run in runs:\n self.__preprocessing(run,make_catchcon,has_scenarios)\n \n def __preprocessing(self,run,\n make_catchcon=False,\n has_scenarios=False):\n \"\"\"\n \"\"\"\n key = run.key\n fpath_project = os.path.join(run.fpath,run.key)\n ###########################################################################\n # pre-processing\n \n if make_catchcon:\n # create connections between cells<>cells and cells<>reches\n catchcon = LandscapeModel.utils.PreProcessing.CatchmentConnector(\n fpath_project,\n simplify_connections=4,\n connection_type=\"RO_GW\")\n # plot results of connections\n catchcon.makePlot(os.path.join(fpath_project,\"flow_network_voroni.png\"),resX=100,resY=100,plotVoroni=True,\n plotElevation=False,plot_simplified=True,fontsize=4,markersize=0.5)\n catchcon.makePlot(os.path.join(fpath_project,\"flow_network.png\"),resX=100,resY=100,plotVoroni=False,\n plotElevation=True,plot_simplified=True,fontsize=4,markersize=0.5)\n \n # calculate area-weighted flow timeseries of reach each and create files\n if run.runtype == \"inStream\":\n ayc = LandscapeModel.utils.PreProcessing.AreaYieldCatchment(\n run.fpath,\n key,\n frunlist=self.fname,\n filetype = run.database,\n time_format=\"%Y-%m-%dT%H:%M\")\n \n data_resampled=ayc.create_timeseries(resample_rule=\"1H\",\n resample_type=\"interpolate\")\n \n if has_scenarios:\n # create scenarios (365 days dry 10%-percentile, medium 50%-percentile and \n # wet 90%-percentile year) and create files\n ayc.create_timeseries_scenarios(resample_rule=\"1H\",\n resample_type=\"interpolate\")\n\n def postprocessing(self,key=\"None\",**kwargs):\n \"\"\"\n \"\"\"\n # select specific key if needed\n if key != \"\" and key!=\"None\":\n runs = [i for i in self.runlist if i.key == key and i.postprocessing == True]\n else:\n runs = [i for i in self.runlist if i.postprocessing == True]\n # conduct post-processing\n for run in runs:\n self.__postprocessing(run,**kwargs)\n \n def __postprocessing(self,run,\n stats = True,\n zero_flow = True,\n performance = False,\n catchment_hydrology = False,\n catchment_efate=False,\n branch_hydrology_and_efate=False,\n reach_hydrology_and_efate=False,\n catchment_validation=False,\n plot_percentile_over_time=False,\n ):\n \"\"\"\n Conducts post-processing.\n \n :param run: Modelrun settings related to runlist.csv\n :type run: modelrun\n \n :returns: - \n :rtype: -\n \"\"\"\n \n # create post-processing class\n pstPrc = LandscapeModel.utils.PostProcessing(os.path.join(run.fpath,run.key),\n run.key,time_format=\"%Y-%m-%dT%H:%M\")\n \n if stats:\n # calculate stats and save file\n stats = pstPrc.get_stats(stats= ['mean', 'median',\"min\",\"max\"],\n params=['depth', 'volume', 'flow', 'area'])\n \n # make plots\n pstPrc.map_stats(stats)\n \n # plot percentiles of variables\n if plot_percentile_over_time:\n pstPrc.plot_percentile_over_time(params=['depth', 'volume', 'flow', 'area'])\n \n if zero_flow:\n # get all reaches with at least one value with flow == 0 and make a plot.\n pstPrc.get_zero_flow_reaches(stats)\n \n if performance:\n # plot observed versus simulated flow\n pstPrc.performance(\"flow\")\n \n if catchment_hydrology:\n # plot histogramm of hydrological parameter across catchment\n pstPrc.catchment_hydrology()\n \n if catchment_efate:\n # plot cumulative distribution function of PEC values across catchment\n pstPrc.catchment_efate(datetime(1900,5,10,10),[1,2,4,8,16,24],\n maxSW=.4,maxSED=.05)\n \n if branch_hydrology_and_efate:\n # plot 3-D plot of two reach variables, e.g. PEC_SW and flow\n pstPrc.branch_hydrology_and_efate(\"PEC_SW\",\n \"PEC$_{SW}$ [$\\mu$g L$^{-1}$]\",\n \"flow\",\n \"Flow [m³ sec$^{-1}$]\",\n reach_start=\"r1443\",reach_end=\"r1438\",\n tstart = datetime(1900,5,2,8),\n tend = datetime(1900,5,3,23),\n tintverval = 4,\n timescale=\"hourly\",\n vmin=None,vmax=None,fname=\"1\")\n if reach_hydrology_and_efate:\n # plot resutls of single reach \n pstPrc.reach_hydrology_and_efate(\"r1443\",tstart=datetime(1900,5,2,8),\n tend=datetime(1900,5,3,23),\n ymax=[0.15,4,0.5,20],\n maxconc=5.,\n maxload=5.) \n\n if catchment_validation:\n pstPrc.catchment_validation()\n\n def message(self,s):\n \"\"\"\n Writes a user message.\n \"\"\"\n print(s)\n\n def split(self,a, n):\n \"\"\"\n Allocates a set of a modelruns to n cores.\n a (int): count of modelruns\n n (int): count of cores\n \n Returns (list):\n List with number of runs per sub-process.\n \"\"\"\n k, m = divmod(len(a), n)\n return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in iter(range(n)))\n\n def __create_batchfiles(self,ncores=2,pythonpath=\"\",execute = True):\n \"\"\"\n Creates a batch process to run several single projects in parallel. The\n batch process is governed by a master batch-file which calls 1 to n \n slave - batch files. Each slave is run in its own separated process. A \n slave itself contain 1 - n projects.\n \n ncores (int): Nubmer of cores available for modelling (= nubmer of slaves)\n pythonpath (string): path of python.exe\n execute (boolean): if True, the master batch-file is called with subpross.Popen\n \"\"\"\n names = []\n filenames=[]\n for chunk in list(self.split(range(len(self.runList)), ncores)):\n batch = []\n batch.append(\"@echo off\")\n if len(chunk)>0:\n for c in chunk:\n filename = self.__getExecutable(self.fdir,self.runList[c].name) \n filenames.append(filename)\n batch.append('call '+ pythonpath +' ' + '\"' + self.fdir +os.sep+ filename + '\"') \n #create batch-file\n name = str(min(chunk)+1) + \"_\" + str(max(chunk)+1)\n f = open(os.path.join(self.fdir,\"slave_\" + name + \".cmd\"),\"w\")\n f.write(\"\\n\".join(batch))\n f.close() \n names.append(name) \n\n # preapre master batch-file to start slaves\n #create master batch-self\n f = open(os.path.join(self.fdir,\"master_run.cmd\"),\"w\")\n f.write(\"\\n\".join([\"start slave_\" + name + \".cmd\" for name in names]))\n f.close() \n\n #make runs \n if execute:\n os.chdir(self.fdir)\n process = subprocess.Popen(self.fdir + os.sep + \"master_run.cmd\", shell=True, stdin=None, stdout=None, stderr=None, close_fds=True)\n out, err = process.communicate()\n process.wait()\n\n def __getExecutable(self,fdir,runname):\n s=[]\n s.append('import os')\n s.append('import LandscapeModel')\n s.append('if __name__ == \"__main__\":')\n s.append('\\tfdir = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-1])')\n s.append('\\trunFactory = LandscapeModel.utils.RunFactory(fdir=r\"' + self.fdir + '\",fname=\"' + self.fname + '\")')\n s.append('\\trunFactory(\"' + runname + '\")')\n s=\"\\n\".join(s)\n filename = \"main_\"+runname+\".py\"\n f = open(os.path.join(fdir,filename),\"w\")\n f.write(s)\n f.close()\n return filename\n \n ", "repo_name": "xlandscape/CmfContinuous-Component", "sub_path": "module/bin/LandscapeModel/utils/RunFactory.py", "file_name": "RunFactory.py", "file_ext": "py", "file_size_in_byte": 12725, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "77", "api": [{"api_name": "LandscapeModel.utils.ParameterList", "line_number": 22, "usage_type": "call"}, {"api_name": "LandscapeModel.utils", "line_number": 22, "usage_type": "attribute"}, {"api_name": "LandscapeModel.Catchment", "line_number": 38, "usage_type": "call"}, {"api_name": "LandscapeModel.Catchment", "line_number": 63, "usage_type": "call"}, {"api_name": "LandscapeModel.utils.Plotting", "line_number": 66, "usage_type": "call"}, {"api_name": "LandscapeModel.utils", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 69, "usage_type": "call"}, {"api_name": "os.path", "line_number": 69, "usage_type": "attribute"}, {"api_name": "LandscapeModel.utils.CatchmentSeparator", "line_number": 76, "usage_type": "call"}, {"api_name": "LandscapeModel.utils", "line_number": 76, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 103, "usage_type": "call"}, {"api_name": "os.path", "line_number": 103, "usage_type": "attribute"}, {"api_name": "LandscapeModel.utils.PreProcessing.CatchmentConnector", "line_number": 109, "usage_type": "call"}, {"api_name": "LandscapeModel.utils", "line_number": 109, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 114, "usage_type": "call"}, {"api_name": "os.path", "line_number": 114, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 116, "usage_type": "call"}, {"api_name": "os.path", "line_number": 116, "usage_type": "attribute"}, {"api_name": "LandscapeModel.utils.PreProcessing.AreaYieldCatchment", "line_number": 121, "usage_type": "call"}, {"api_name": "LandscapeModel.utils", "line_number": 121, "usage_type": "attribute"}, {"api_name": "LandscapeModel.utils.PostProcessing", "line_number": 171, "usage_type": "call"}, {"api_name": "LandscapeModel.utils", "line_number": 171, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 171, "usage_type": "call"}, {"api_name": "os.path", "line_number": 171, "usage_type": "attribute"}, {"api_name": "datetime.datetime", "line_number": 200, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 210, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 211, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 217, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 218, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 264, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 267, "usage_type": "call"}, {"api_name": "os.path", "line_number": 267, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 274, "usage_type": "call"}, {"api_name": "os.path", "line_number": 274, "usage_type": "attribute"}, {"api_name": "os.chdir", "line_number": 280, "usage_type": "call"}, {"api_name": "subprocess.Popen", "line_number": 281, "usage_type": "call"}, {"api_name": "os.sep", "line_number": 281, "usage_type": "attribute"}, {"api_name": "os.path.join", "line_number": 295, "usage_type": "call"}, {"api_name": "os.path", "line_number": 295, "usage_type": "attribute"}]} +{"seq_id": "38638882208", "text": "import pytest\r\nfrom wordle_solver.solver import Solver\r\n\r\nclass TestBase:\r\n @pytest.fixture(autouse=True)\r\n def _solver(self):\r\n word_list = ['abc', 'cde', 'def']\r\n self.solver = Solver(word_list, alphabet='abcdef')\r\n\r\n @pytest.fixture(autouse=True)\r\n def _useful_setup_variables(self):\r\n self.list_of_empty_sets = [set(), set(), set()]\r\n self.state_abc = ['a', 'b', 'c']\r\n self.state_ab_None = ['a', 'b', None]\r\n\r\nclass TestAvailableLetters(TestBase):\r\n def test_without_restrictions(self):\r\n warm_letters = self.list_of_empty_sets\r\n available_letters = self.solver.available_letters(warm_letters, set())\r\n for position in available_letters:\r\n assert sorted(position) == list('abcdef')\r\n\r\n def test_with_warm_letters(self):\r\n warm_letters = [{'a', 'b'}, {'c'}, set()]\r\n available_letters = self.solver.available_letters(warm_letters, set())\r\n assert sorted(available_letters[0]) == list('cdef')\r\n assert sorted(available_letters[1]) == list('abdef')\r\n assert sorted(available_letters[2]) == list('abcdef')\r\n\r\n def test_with_cold_letters(self):\r\n warm_letters = self.list_of_empty_sets\r\n cold_letters = {'a', 'b'}\r\n available_letters = self.solver.available_letters(warm_letters, cold_letters)\r\n for position in available_letters:\r\n assert sorted(position) == list('cdef')\r\n\r\n def test_with_warm_and_cold_letters(self):\r\n warm_letters = [{'b'}, set(), {'d'}]\r\n cold_letters = {'a'}\r\n available_letters = self.solver.available_letters(warm_letters, cold_letters)\r\n assert sorted(available_letters[0]) == list('cdef')\r\n assert sorted(available_letters[1]) == list('bcdef')\r\n assert sorted(available_letters[2]) == list('bcef')\r\n\r\nclass TestTransition(TestBase):\r\n def test_updates_value(self):\r\n new_state = self.solver.transition(self.state_ab_None, 'c', 2)\r\n assert ''.join(new_state) == 'abc'\r\n\r\n def test_maintains_original(self):\r\n state = [None]\r\n new_state = self.solver.transition(state, 'a', 0)\r\n assert state[0] is None\r\n\r\nclass TestSuccesors(TestBase):\r\n def test_terminal_state(self):\r\n available_letters = [{'d'}] * 3\r\n successors = self.solver.successors(self.state_abc, available_letters)\r\n assert len(list(successors)) == 0\r\n\r\n def test_nonterminal_state(self):\r\n state = ['a', None]\r\n available_letters = [{'b', 'c', 'd'}] * 2\r\n successors = self.solver.successors(state, available_letters)\r\n assert sorted(successors) == [['a', 'b'], ['a', 'c'], ['a', 'd']]\r\n\r\nclass TestIsValid(TestBase):\r\n def test_valid_state_no_requirements(self):\r\n warm_letters = self.list_of_empty_sets\r\n is_valid = self.solver.is_valid(self.state_abc, warm_letters)\r\n assert is_valid\r\n\r\n def test_valid_state_with_requirements(self):\r\n warm_letters = [set(), {'c'}, {'a'}]\r\n is_valid = self.solver.is_valid(self.state_abc, warm_letters)\r\n assert is_valid\r\n\r\n def test_invalid_state_with_requirements(self):\r\n warm_letters = self.list_of_empty_sets\r\n warm_letters[0] = {'d'}\r\n is_valid = self.solver.is_valid(self.state_abc, warm_letters)\r\n assert not is_valid\r\n\r\nclass TestIsTerminal(TestBase):\r\n def test_terminal_state(self):\r\n is_terminal = self.solver.is_terminal(self.state_abc)\r\n assert is_terminal\r\n\r\n def test_nonterminal_state(self):\r\n is_terminal = self.solver.is_terminal(self.state_ab_None)\r\n assert not is_terminal\r\n\r\nclass TestSearch(TestBase):\r\n def test_no_starting_info(self):\r\n initial_state = [None] * 3\r\n warm_letters = self.list_of_empty_sets\r\n possible_words = self.solver.search(initial_state, warm_letters, set())\r\n assert len(possible_words) == len(self.solver.alphabet) ** self.solver.word_length\r\n\r\n def test_with_cold_letters(self):\r\n initial_state = self.state_ab_None\r\n warm_letters = self.list_of_empty_sets\r\n cold_letters = {'e', 'f'}\r\n possible_words = self.solver.search(initial_state, warm_letters, cold_letters)\r\n assert sorted(possible_words) == ['aba', 'abb', 'abc', 'abd']\r\n\r\n def test_with_warm_and_cold_letters(self):\r\n initial_state = self.state_ab_None\r\n warm_letters = self.list_of_empty_sets\r\n warm_letters[0] = {'c'}\r\n cold_letters = {'e', 'f'}\r\n possible_words = self.solver.search(initial_state, warm_letters, cold_letters)\r\n assert sorted(possible_words) == ['abc']\r\n\r\nclass TestPossibleAnswers(TestBase):\r\n def test_no_starting_info(self):\r\n initial_state = [None] * 3\r\n warm_letters = self.list_of_empty_sets\r\n possible_answers = self.solver.possible_answers(initial_state, warm_letters, set())\r\n assert sorted(possible_answers) == sorted(self.solver.master_word_list)\r\n\r\n def test_with_starting_info(self):\r\n initial_state = self.state_ab_None\r\n warm_letters = self.list_of_empty_sets\r\n warm_letters[0] = {'c'}\r\n cold_letters = {'e', 'f'}\r\n possible_answers = self.solver.possible_answers(initial_state, warm_letters, cold_letters)\r\n assert sorted(possible_answers) == ['abc']\r\n", "repo_name": "jakeoeding/wordle-solver", "sub_path": "tests/solver_test.py", "file_name": "solver_test.py", "file_ext": "py", "file_size_in_byte": 5327, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "wordle_solver.solver.Solver", "line_number": 8, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 5, "usage_type": "call"}, {"api_name": "pytest.fixture", "line_number": 10, "usage_type": "call"}]} +{"seq_id": "13911412360", "text": "import random\n\nimport config\n\nclass Figure:\n def __init__(self, grid):\n # Выбираем рандомно фигуру и вытаскиваем её положения\n self.figurePositions = config.FIGURES[random.randint(1, len(config.FIGURES))]\n # Положение по умолчанию стоит 0\n self.figurePosition = 0\n # Выбираем данную позицию\n self.figure = self.figurePositions[self.figurePosition]\n # Устанавливаем начальные координаты фигуры\n self.figureXY = {\n 'x1': 4,\n 'y1': 0,\n 'x2': len(self.figure[0])+4,\n 'y2': len(self.figure),\n }\n\n self.grid = grid\n self.statusDecline = True\n\n\n def declineMethod(self):\n ''' Опускает фигуру, увеличивая её координаты проекции на сетку '''\n self.figureXY['y1'] += 1\n self.figureXY['y2'] += 1\n\n self.grid.print_grid(self)\n\n def insertFigureInGrid(self):\n ''' Вставляем фигуру в сетку игры '''\n\n figureRangeY = range(self.figureXY['y1'], self.figureXY['y2'])\n figureRangeX = range(self.figureXY['x1'], self.figureXY['x2'])\n\n for figureLineY, y in enumerate(figureRangeY):\n for figureLineX, x in enumerate(figureRangeX):\n if self.figure[figureLineY][figureLineX]:\n self.grid.matrix[y-1][x] = 1\n self.grid.cleanCollectedLine(y-1)\n\n def declineCheckConditions(self):\n '''\n Проверяет нижнюю линию на присутствие блоков, если они стоят - опускаем\n фигуру вниз, ставим её, и выходим из главного цикла работы с фигурой,\n если нету - меняем координаты её видимости (y1, y2).\n '''\n try:\n # Беру линию находящуюся под фигурой\n # mainline = self.grid.matrix[self.figureXY['y2']+1]\n # Перебирает значения нижней линии сетки и сравнивать со значением\n # нижней линии фигуры, если каждый из них будет равен 1 - обрывает\n # выполнение функции, и устанавливает статус опускания фигуры в False.\n for figureLineY, gridY in enumerate(range(self.figureXY['y1'], self.figureXY['y2'])):\n for figureLineX, gridX in enumerate(range(self.figureXY['x1'],self.figureXY['x2'])):\n if self.grid.matrix[gridY+1][gridX] and self.figure[figureLineY][figureLineX]:\n self.statusDecline = False\n return\n\n # Опускаем фигуру\n self.declineMethod()\n except IndexError:\n # Реагирует на начало стакана, обрывает выполнение и меняет статус\n # падения фигуры\n self.statusDecline = False\n return\n\n\n def moving(self, side):\n ''' Перемещает фигуру вправо или влево, если в той стороне нет помех '''\n\n # FigureLineY, для цикла for, который начнёт перебирать\n # значения всех линий сетки по Yn, в пределах которогых находится\n # фигура (определены в mainlines, работа через yline).\n\n mainlines = [self.grid.matrix[y] for y in range(self.figureXY['y1'], self.figureXY['y2'])]\n\n figureLineY = 0\n\n if side == 'left':\n # Индекс блока, находящегося слева от блока фигуры и его проверка\n # на случай попытки выходка за границы сетки\n xIndexChar = self.figureXY['x1'] - 1\n if xIndexChar < 0:\n return False\n\n for yline in mainlines:\n xchar = yline[xIndexChar]\n\n # Проверяем, нет ли слева блока, который может помешать.\n if xchar == 1 and self.figure[figureLineY][0] == 1:\n return False\n figureLineY += 1\n\n # Перемещение влево\n self.figureXY['x1'] -= 1\n self.figureXY['x2'] -= 1\n\n elif side == 'right':\n # Индекс блока, находящегося справа от блока фигуры и его проверка\n # на случай попытки выхода за границы сетки\n xIndexChar = self.figureXY['x2']\n if xIndexChar > config.WEIGHT_GRID-1:\n return False\n\n for yline in mainlines:\n xchar = yline[xIndexChar]\n\n # Проверяем, нет ли спрва блока, который может помешать.\n if xchar == 1 and self.figure[figureLineY][-1] == 1:\n return False\n figureLineY += 1\n\n # Перемещение вправо\n self.figureXY['x1'] += 1\n self.figureXY['x2'] += 1\n\n self.grid.print_grid(self)\n\n def coup(self):\n ''' Меняем положение фигуры '''\n\n # Инкрементируем идекс позиции\n self.figurePosition += 1\n\n # Вытаскиваем значение и устанавливаем его фигуре, если его нет, то\n # возвращаемся в позицию по умолчанию\n try:\n self.figure = self.figurePositions[self.figurePosition]\n except IndexError:\n self.figurePosition = 0\n self.figure = self.figurePositions[self.figurePosition]\n\n # Устанавливаем новые координты фигуры, т.к при изменении её размер\n # может меняться.\n self.figureXY['x2'] = len(self.figure[-1]) + self.figureXY['x1']\n self.figureXY['y2'] = len(self.figure) + self.figureXY['y1']\n\n # Сразу выводим, что бы не заставлять ждать пользователя появления\n # позиции.\n self.grid.print_grid(self)\n\n def checkLose(self):\n figureRangeY = range(self.figureXY['y1'], self.figureXY['y2'])\n figureRangeX = range(self.figureXY['x1'], self.figureXY['x2'])\n\n for yCoord in figureRangeY:\n for xCoord in figureRangeX:\n if self.grid.matrix[yCoord][xCoord] == 1:\n exit('\\nLosing!')\n", "repo_name": "DUZA-dev/Tetris-in-terminal", "sub_path": "figure.py", "file_name": "figure.py", "file_ext": "py", "file_size_in_byte": 7010, "program_lang": "python", "lang": "ru", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "config.FIGURES", "line_number": 8, "usage_type": "attribute"}, {"api_name": "random.randint", "line_number": 8, "usage_type": "call"}, {"api_name": "config.WEIGHT_GRID", "line_number": 105, "usage_type": "attribute"}]} +{"seq_id": "38020077297", "text": "import argparse\nimport copy\nimport numpy as np\nimport os\nfrom pathlib import Path\nimport pandapower.plotting as ppl\nimport pandapower as pp\nimport pandapower.networks as pn\nimport pandapower.toolbox as tb\nimport random\nimport string\nimport subgraphs_methods\nimport time\nimport warnings\n# Suppress FutureWarning\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\n\n\ndef generate():\n arguments = get_arguments()\n x, t = create_networks(arguments)\n print(f\"{x} networks created in {t:0.2f} seconds\")\n\n\ndef get_arguments():\n parser = argparse.ArgumentParser(\n prog=\"power network subgraph generator\",\n description=\"Generates a specified number of subnetworks from a power network\",\n )\n parser.add_argument(\"network\", choices=['case4gs', 'case5', 'case6ww', 'case9', 'case14', 'case24_ieee_rts', 'case30', 'case_ieee30', 'case39', 'case57', 'case89pegase', 'case118', 'case145', 'case_illinois200', 'case300', 'case1354pegase', 'case1888rte', 'case2848rte', 'case2869pegase', 'case3120sp', 'case6470rte', 'case6495rte', 'case6515rte', 'case9241', 'GBnetwork', 'GBreducednetwork', 'iceland'])\n parser.add_argument(\"-n\", \"--num_subgraphs\", type=int, default=10)\n\n # if file is moved in another directory level relative to the root (currently in root/data_generation), this needs to be changed\n root_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n parser.add_argument(\"-s\", \"--save_dir\", default=root_directory + \"/Data\") \n\n parser.add_argument(\"--min_size\", type=int, default=5)\n parser.add_argument(\"--max_size\", type=int, default=30)\n # parser.add_argument(\"--n_1\", type=bool, default=False)\n parser.add_argument(\"--subgraphing_method\", choices=['rnd_neighbor', 'bfs', 'rnd_walk', 'partitioning'], default='rnd_neighbor')\n\n args = parser.parse_args()\n print(args)\n return args\n\n\ndef get_network(network_name):\n if network_name == 'case4gs':\n network = pn.case4gs()\n elif network_name == 'case5':\n network = pn.case5()\n elif network_name == 'case6ww':\n network = pn.case6ww()\n elif network_name == 'case14':\n network = pn.case14()\n elif network_name == 'case24_ieee_rts':\n network = pn.case24_ieee_rts()\n elif network_name == 'case30':\n network = pn.case30()\n elif network_name == 'case_ieee30':\n network = pn.case_ieee30()\n elif network_name == 'case39':\n network = pn.case39()\n elif network_name == 'case57':\n network = pn.case57()\n elif network_name == 'case89pegase':\n network = pn.case89pegase()\n elif network_name == 'case118':\n network = pn.case118()\n elif network_name == 'case145':\n network = pn.case145()\n elif network_name == 'case_illinois200':\n network = pn.case_illinois200()\n elif network_name == 'case300':\n network = pn.case300()\n elif network_name == 'case1354pegase': \n network = pn.case1354pegase()\n elif network_name == 'case1888rte':\n network = pn.case1888rte()\n elif network_name == 'case2848rte':\n network = pn.case2848rte()\n elif network_name == 'case2869pegase': \n network = pn.case2869pegase()\n elif network_name == 'case3120sp':\n network = pn.case3120sp()\n elif network_name == 'case6470rte':\n network = pn.case6470rte()\n elif network_name == 'case6495rte':\n network = pn.case6495rte()\n elif network_name == 'case6515rte':\n network = pn.case6515rte()\n elif network_name == 'case9241':\n network = pn.case9241()\n elif network_name == 'GBnetwork':\n network = pn.GBnetwork()\n elif network_name == 'GBreducednetwork':\n network = pn.GBreducednetwork()\n elif network_name == 'iceland':\n network = pn.iceland()\n return network\n\n\ndef get_subgraphing_method(method_name):\n if method_name == 'rnd_neighbor':\n return subgraphs_methods.random_neighbor_selection\n elif method_name == 'bfs':\n return subgraphs_methods.bfs_neighbor_selection\n elif method_name == 'rnd_walk':\n return subgraphs_methods.random_walk_neighbor_selection\n elif method_name == 'partitioning':\n return subgraphs_methods.partition_graph\n\n\ndef create_networks(arguments):\n start = time.perf_counter()\n full_net = get_network(arguments.network)\n\n # the external grid is plotted as a yellow rectangle, buses are blue dots, loads are black triangles with tip pointing down\n # transformers are red overlapping circles, generators are black circle with a small black propeller in the middle \n # ppl.simple_plot(full_net, plot_loads=True, plot_gens=True, trafo_color=\"r\", switch_color=\"g\") \n\n subgraphing_method = get_subgraphing_method(arguments.subgraphing_method)\n n_subgraph_generated = 0\n if arguments.subgraphing_method == 'partitioning':\n all_partitions_busses = subgraphing_method(full_net)\n for partition_busses in all_partitions_busses:\n is_subgraph_solved = solve_and_save(full_net, partition_busses, arguments)\n if is_subgraph_solved:\n n_subgraph_generated += 1\n else:\n # A starting point is any bus that is connected to a generator to ensure that subgraphs contain at least one generator\n starting_points = full_net.gen.bus\n while n_subgraph_generated < arguments.num_subgraphs:\n print(f\"generating network {n_subgraph_generated + 1}\")\n # if arguments.n_1:\n # subgraph_busses = list(full_net.bus.index)\n # downed_bus = np.random.randint(0, len(subgraph_busses))\n # del subgraph_busses[downed_bus]\n \n # else:\n subgraph_length = np.random.randint(arguments.min_size, min(arguments.max_size, len(full_net.bus)))\n initial_bus = starting_points[np.random.randint(0, len(starting_points))]\n subgraph_busses = subgraphing_method(full_net, initial_bus, subgraph_length)\n \n is_subgraph_solved = solve_and_save(full_net, subgraph_busses, arguments)\n \n if is_subgraph_solved:\n n_subgraph_generated += 1\n\n end = time.perf_counter()\n return n_subgraph_generated, end - start\n\n\ndef solve_and_save(full_net, subgraph_busses, arguments):\n subgraph_net = tb.select_subnet(full_net, subgraph_busses)\n subgraph_length = len(subgraph_busses)\n\n try:\n subgraph_net = modify_network_values(subgraph_net)\n subgraph_net.sn_mva = full_net.sn_mva\n # check if the subgraph contains a slack bus, if not add one by setting the slack bus to a random bus\n # if full_net.ext_grid.bus.item() not in subgraph_busses:\n # slack_bus = subgraph_busses[np.random.randint(0, len(subgraph_busses))]\n # # https://pandapower.readthedocs.io/en/v2.1.0/elements/ext_grid.html#pandapower.create_ext_grid\n # pp.create_ext_grid(subgraph_net, slack_bus)\n pp.runpp(subgraph_net, numba = False)\n # ppl.simple_plot(subgraph_net, plot_loads=True, plot_gens=True, trafo_color=\"r\", switch_color=\"g\") \n\n except:\n print(f\"Network not solvable trying a new one\")\n return False\n\n uid = ''.join([random.choice(string.ascii_letters\n + string.digits) for _ in range(8)])\n \n Path(f\"{arguments.save_dir}/x\").mkdir(parents=True, exist_ok=True)\n Path(f\"{arguments.save_dir}/y\").mkdir(parents=True, exist_ok=True)\n \n pp.to_json(subgraph_net, f\"{arguments.save_dir}/x/{arguments.network}_{subgraph_length}_{arguments.subgraphing_method}_{uid}.json\")\n subgraph_net.res_gen.to_csv(f\"{arguments.save_dir}/y/{arguments.network}_{subgraph_length}_{arguments.subgraphing_method}_{uid}_gen.csv\")\n subgraph_net.res_line.to_csv(f\"{arguments.save_dir}/y/{arguments.network}_{subgraph_length}_{arguments.subgraphing_method}_{uid}_line.csv\")\n subgraph_net.res_bus.to_csv(f\"{arguments.save_dir}/y/{arguments.network}_{subgraph_length}_{arguments.subgraphing_method}_{uid}_bus.csv\")\n\n return True\n\n\ndef modify_network_values(netw):\n # Set leakage (shunt) values to 0\n netw.line['c_nf_per_km'] = 0.0\n netw.line['g_us_per_km'] = 0.0\n netw.shunt = netw.shunt[0:0]\n\n # Set transformer leakage values to 0\n netw.trafo['vkr_percent'] = 0.0\n netw.trafo['pfe_kw'] = 0.0\n netw.trafo['i0_percent'] = 0.0\n netw.trafo['tap_step_percent'] = 0.0\n return netw\n\n\nif __name__ == \"__main__\":\n generate()", "repo_name": "AlexDeLos/GNN_OPF", "sub_path": "data_generation/generate.py", "file_name": "generate.py", "file_ext": "py", "file_size_in_byte": 8439, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "77", "api": [{"api_name": "warnings.filterwarnings", "line_number": 16, "usage_type": "call"}, {"api_name": "argparse.ArgumentParser", "line_number": 26, "usage_type": "call"}, {"api_name": "os.path.dirname", "line_number": 34, "usage_type": "call"}, {"api_name": "os.path", "line_number": 34, "usage_type": "attribute"}, {"api_name": "os.path.abspath", "line_number": 34, "usage_type": "call"}, {"api_name": "pandapower.networks.case4gs", "line_number": 49, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 49, "usage_type": "name"}, {"api_name": "pandapower.networks.case5", "line_number": 51, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 51, "usage_type": "name"}, {"api_name": "pandapower.networks.case6ww", "line_number": 53, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 53, "usage_type": "name"}, {"api_name": "pandapower.networks.case14", "line_number": 55, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 55, "usage_type": "name"}, {"api_name": "pandapower.networks.case24_ieee_rts", "line_number": 57, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 57, "usage_type": "name"}, {"api_name": "pandapower.networks.case30", "line_number": 59, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 59, "usage_type": "name"}, {"api_name": "pandapower.networks.case_ieee30", "line_number": 61, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 61, "usage_type": "name"}, {"api_name": "pandapower.networks.case39", "line_number": 63, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 63, "usage_type": "name"}, {"api_name": "pandapower.networks.case57", "line_number": 65, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 65, "usage_type": "name"}, {"api_name": "pandapower.networks.case89pegase", "line_number": 67, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 67, "usage_type": "name"}, {"api_name": "pandapower.networks.case118", "line_number": 69, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 69, "usage_type": "name"}, {"api_name": "pandapower.networks.case145", "line_number": 71, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 71, "usage_type": "name"}, {"api_name": "pandapower.networks.case_illinois200", "line_number": 73, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 73, "usage_type": "name"}, {"api_name": "pandapower.networks.case300", "line_number": 75, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 75, "usage_type": "name"}, {"api_name": "pandapower.networks.case1354pegase", "line_number": 77, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 77, "usage_type": "name"}, {"api_name": "pandapower.networks.case1888rte", "line_number": 79, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 79, "usage_type": "name"}, {"api_name": "pandapower.networks.case2848rte", "line_number": 81, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 81, "usage_type": "name"}, {"api_name": "pandapower.networks.case2869pegase", "line_number": 83, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 83, "usage_type": "name"}, {"api_name": "pandapower.networks.case3120sp", "line_number": 85, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 85, "usage_type": "name"}, {"api_name": "pandapower.networks.case6470rte", "line_number": 87, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 87, "usage_type": "name"}, {"api_name": "pandapower.networks.case6495rte", "line_number": 89, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 89, "usage_type": "name"}, {"api_name": "pandapower.networks.case6515rte", "line_number": 91, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 91, "usage_type": "name"}, {"api_name": "pandapower.networks.case9241", "line_number": 93, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 93, "usage_type": "name"}, {"api_name": "pandapower.networks.GBnetwork", "line_number": 95, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 95, "usage_type": "name"}, {"api_name": "pandapower.networks.GBreducednetwork", "line_number": 97, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 97, "usage_type": "name"}, {"api_name": "pandapower.networks.iceland", "line_number": 99, "usage_type": "call"}, {"api_name": "pandapower.networks", "line_number": 99, "usage_type": "name"}, {"api_name": "subgraphs_methods.random_neighbor_selection", "line_number": 105, "usage_type": "attribute"}, {"api_name": "subgraphs_methods.bfs_neighbor_selection", "line_number": 107, "usage_type": "attribute"}, {"api_name": "subgraphs_methods.random_walk_neighbor_selection", "line_number": 109, "usage_type": "attribute"}, {"api_name": "subgraphs_methods.partition_graph", "line_number": 111, "usage_type": "attribute"}, {"api_name": "time.perf_counter", "line_number": 115, "usage_type": "call"}, {"api_name": "numpy.random.randint", "line_number": 141, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 141, "usage_type": "attribute"}, {"api_name": "numpy.random.randint", "line_number": 142, "usage_type": "call"}, {"api_name": "numpy.random", "line_number": 142, "usage_type": "attribute"}, {"api_name": "time.perf_counter", "line_number": 150, "usage_type": "call"}, {"api_name": "pandapower.toolbox.select_subnet", "line_number": 155, "usage_type": "call"}, {"api_name": "pandapower.toolbox", "line_number": 155, "usage_type": "name"}, {"api_name": "pandapower.runpp", "line_number": 166, "usage_type": "call"}, {"api_name": "random.choice", "line_number": 173, "usage_type": "call"}, {"api_name": "string.ascii_letters", "line_number": 173, "usage_type": "attribute"}, {"api_name": "string.digits", "line_number": 174, "usage_type": "attribute"}, {"api_name": "pathlib.Path", "line_number": 176, "usage_type": "call"}, {"api_name": "pathlib.Path", "line_number": 177, "usage_type": "call"}, {"api_name": "pandapower.to_json", "line_number": 179, "usage_type": "call"}]} +{"seq_id": "30796316321", "text": "from .base import Base\nfrom sqlalchemy import Column, BIGINT, String, BOOLEAN, TIMESTAMP, func, Index, text, INTEGER, ForeignKey\nfrom .user import User\n\nclass Shop(Base):\n __tablename__ = \"shop\"\n\n id = Column(BIGINT, autoincrement=True, primary_key=True)\n name = Column(String(50), nullable=False)\n owner_id = Column(BIGINT, ForeignKey(\"user.id\", ondelete=\"CASCADE\", name=\"shop_owner_fk\"), nullable=False) \n created_at = Column(TIMESTAMP, server_default=func.now())\n updated_at = Column(TIMESTAMP, server_default=text(\"CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP\"))\n\n __table_args__ = ()\n", "repo_name": "msyamsula/SyamsulApp-alembic", "sub_path": "models/shop.py", "file_name": "shop.py", "file_ext": "py", "file_size_in_byte": 611, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "base.Base", "line_number": 5, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 8, "usage_type": "call"}, {"api_name": "sqlalchemy.BIGINT", "line_number": 8, "usage_type": "argument"}, {"api_name": "sqlalchemy.Column", "line_number": 9, "usage_type": "call"}, {"api_name": "sqlalchemy.String", "line_number": 9, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlalchemy.BIGINT", "line_number": 10, "usage_type": "argument"}, {"api_name": "sqlalchemy.ForeignKey", "line_number": 10, "usage_type": "call"}, {"api_name": "sqlalchemy.Column", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.TIMESTAMP", "line_number": 11, "usage_type": "argument"}, {"api_name": "sqlalchemy.func.now", "line_number": 11, "usage_type": "call"}, {"api_name": "sqlalchemy.func", "line_number": 11, "usage_type": "name"}, {"api_name": "sqlalchemy.Column", "line_number": 12, "usage_type": "call"}, {"api_name": "sqlalchemy.TIMESTAMP", "line_number": 12, "usage_type": "argument"}, {"api_name": "sqlalchemy.text", "line_number": 12, "usage_type": "call"}]} +{"seq_id": "3409587731", "text": "from sql_alchemy import banco\nfrom datetime import datetime\nfrom resources.extend_date import extend_license\n\n\nclass FoodModel(banco.Model):\n __tablename__ = 'food'\n food_id = banco.Column(banco.Integer, primary_key = True)\n user_id = banco.Column(banco.Integer, banco.ForeignKey('users.user_id')) #adicionar Relacionamento na tabela Users\n barcode = banco.Column(banco.String(13))\n name = banco.Column(banco.String(80))\n brand = banco.Column(banco.String(80))\n created_in = banco.Column(banco.String(30))\n updated_in = banco.Column(banco.String(30))\n description = banco.Column(banco.String(30))\n ingredients = banco.Column(banco.String(300))\n serving_unit = banco.Column(banco.String(10))\n serving_amount = banco.Column(banco.Float(precision = 2))\n calories = banco.Column(banco.Integer)\n carbohydrate = banco.Column(banco.Float(precision = 2))\n protein = banco.Column(banco.Float(precision = 2))\n total_fat = banco.Column(banco.Float(precision = 2))\n saturated_fat = banco.Column(banco.Float(precision = 2))\n polyunsaturated_fat = banco.Column(banco.Float(precision = 2))\n monounsaturated_fat = banco.Column(banco.Float(precision = 2))\n trans_fat = banco.Column(banco.Float(precision = 2))\n cholesterol = banco.Column(banco.Float(precision = 2))\n sodium = banco.Column(banco.Float(precision = 2))\n fiber = banco.Column(banco.Float(precision = 2))\n sugar = banco.Column(banco.Float(precision = 2))\n vitamin_a = banco.Column(banco.Float(precision = 2)) \n vitamin_b1 = banco.Column(banco.Float(precision = 2))\n vitamin_b12 = banco.Column(banco.Float(precision = 2))\n vitamin_c = banco.Column(banco.Float(precision = 2))\n vitamin_d = banco.Column(banco.Float(precision = 2))\n vitamin_e = banco.Column(banco.Float(precision = 2))\n vitamin_k = banco.Column(banco.Float(precision = 2))\n potassium = banco.Column(banco.Float(precision = 2))\n zync = banco.Column(banco.Float(precision = 2))\n magnesium = banco.Column(banco.Float(precision = 2)) \n iron = banco.Column(banco.Float(precision = 2)) \n chromium= banco.Column(banco.Float(precision = 2)) \n\n \n def __init__(self, user_id, barcode, name, brand, description,\\\n ingredients, serving_unit, serving_amount, calories, carbohydrate, protein,\\\n total_fat, saturated_fat, polyunsaturated_fat, monounsaturated_fat, trans_fat, \\\n cholesterol, sodium, fiber, sugar, vitamin_a, vitamin_b1, vitamin_b12, vitamin_c,\\\n vitamin_d, vitamin_e, vitamin_k, potassium, zync, magnesium, iron, chromium ):\n self.user_id = user_id \n self.barcode = barcode \n self.name = name \n self.brand = brand \n self.created_in = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") \n self.updated_in = None \n self.description = description \n self.ingredients = ingredients\n self.serving_unit = serving_unit\n self.serving_amount = serving_amount \n self.calories = calories \n self.carbohydrate = carbohydrate \n self.protein = protein \n self.total_fat = total_fat \n self.saturated_fat = saturated_fat \n self.polyunsaturated_fat = polyunsaturated_fat \n self.monounsaturated_fat = monounsaturated_fat \n self.trans_fat = trans_fat\n self.cholesterol = cholesterol \n self.sodium = sodium \n self.fiber = fiber \n self.sugar = sugar \n self.vitamin_a = vitamin_a \n self.vitamin_b1 = vitamin_b1 \n self.vitamin_b12 = vitamin_b12 \n self.vitamin_c = vitamin_c \n self.vitamin_d = vitamin_d \n self.vitamin_e = vitamin_e \n self.vitamin_k = vitamin_k \n self.potassium = potassium \n self.zync = zync \n self.magnesium = magnesium \n self.iron = iron \n self.chromium = chromium \n\n \n def json(self):\n return {\n 'food_id': self.food_id,\n 'user_id': self.user_id, \n 'barcode': self.barcode, \n 'name': self.name, \n 'brand': self.brand, \n 'created_in': self.created_in, \n 'updated_in': self.updated_in, \n 'description': self.description, \n 'ingredients': self.ingredients,\n 'serving_unit': self.serving_unit,\n 'serving_amount': self.serving_amount, \n 'calories': self.calories, \n 'carbohydrate': self.carbohydrate, \n 'protein': self.protein, \n 'total_fat': self.total_fat, \n 'saturated_fat': self.saturated_fat, \n 'polyunsaturated_fat': self.polyunsaturated_fat, \n 'monounsaturated_fat': self.monounsaturated_fat, \n 'trans_fat': self.trans_fat,\n 'cholesterol': self.cholesterol, \n 'sodium': self.sodium, \n 'fiber': self.fiber, \n 'sugar': self.sugar, \n 'vitamin_a': self.vitamin_a,\n 'vitamin_b1': self.vitamin_b1, \n 'vitamin_b12': self.vitamin_b12, \n 'vitamin_c': self.vitamin_c, \n 'vitamin_d': self.vitamin_d, \n 'vitamin_e': self.vitamin_e, \n 'vitamin_k': self.vitamin_k, \n 'potassium': self.potassium, \n 'zync': self.zync, \n 'magnesium': self.magnesium, \n 'iron': self.iron, \n 'chromium': self.chromium\n }\n \n @classmethod\n def find_food(cls, food_id):\n with banco.session.no_autoflush:\n food = cls.query.filter_by(food_id = food_id).first()\n if food:\n return food\n return None\n \n @classmethod #aqui vai acontecer busca, fazer uma busca usando o %like%, que vai retornar uma lista de IDs e descrições\n def find_by_name(cls, name):\n pass\n \n @classmethod\n def find_by_barcode(cls, barcode):\n food = cls.query.filter_by(barcode = barcode).first()\n if food:\n return food\n return None\n \n def update_food(self):\n self.updated_in = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\") \n \n def save_food(self):\n banco.session.add(self)\n banco.session.commit()\n \n def delete_food(self):\n banco.session.delete(self)\n banco.session.commit()\n \n ", "repo_name": "randallvictorflagg/NutrInput", "sub_path": "models/food.py", "file_name": "food.py", "file_ext": "py", "file_size_in_byte": 6346, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "sql_alchemy.banco.Model", "line_number": 6, "usage_type": "attribute"}, {"api_name": "sql_alchemy.banco", "line_number": 6, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 8, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 8, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Integer", "line_number": 8, "usage_type": "attribute"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 9, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 9, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Integer", "line_number": 9, "usage_type": "attribute"}, {"api_name": "sql_alchemy.banco.ForeignKey", "line_number": 9, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 10, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 10, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.String", "line_number": 10, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 11, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 11, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.String", "line_number": 11, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 12, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 12, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.String", "line_number": 12, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 13, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 13, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.String", "line_number": 13, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 14, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 14, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.String", "line_number": 14, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 15, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 15, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.String", "line_number": 15, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 16, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 16, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.String", "line_number": 16, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 17, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 17, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.String", "line_number": 17, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 18, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 18, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 18, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 19, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 19, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Integer", "line_number": 19, "usage_type": "attribute"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 20, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 20, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 20, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 21, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 21, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 21, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 22, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 22, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 22, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 23, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 23, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 23, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 24, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 24, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 24, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 25, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 25, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 25, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 26, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 26, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 26, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 27, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 27, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 27, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 28, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 28, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 28, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 29, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 29, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 29, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 30, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 30, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 30, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 31, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 31, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 31, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 32, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 32, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 32, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 33, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 33, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 33, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 34, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 34, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 34, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 35, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 35, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 35, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 36, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 36, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 36, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 37, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 37, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 37, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 38, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 38, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 38, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 39, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 39, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 39, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 40, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 40, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 40, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 41, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 41, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 41, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.Column", "line_number": 42, "usage_type": "call"}, {"api_name": "sql_alchemy.banco", "line_number": 42, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.Float", "line_number": 42, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 54, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 54, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.session", "line_number": 127, "usage_type": "attribute"}, {"api_name": "sql_alchemy.banco", "line_number": 127, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 145, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 145, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.session.add", "line_number": 148, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.session", "line_number": 148, "usage_type": "attribute"}, {"api_name": "sql_alchemy.banco", "line_number": 148, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.session.commit", "line_number": 149, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.session", "line_number": 149, "usage_type": "attribute"}, {"api_name": "sql_alchemy.banco", "line_number": 149, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.session.delete", "line_number": 152, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.session", "line_number": 152, "usage_type": "attribute"}, {"api_name": "sql_alchemy.banco", "line_number": 152, "usage_type": "name"}, {"api_name": "sql_alchemy.banco.session.commit", "line_number": 153, "usage_type": "call"}, {"api_name": "sql_alchemy.banco.session", "line_number": 153, "usage_type": "attribute"}, {"api_name": "sql_alchemy.banco", "line_number": 153, "usage_type": "name"}]} +{"seq_id": "36886290725", "text": "from cgitb import grey\nfrom msilib.schema import Icon\nfrom string import whitespace\nfrom tkinter import *\nimport sqlite3\nfrom tkinter import messagebox\nfrom turtle import bgcolor\ncon =sqlite3.connect('crm.db')\ncur =con.cursor()\n\n\n\nclass Createclient(Toplevel):\n def __init__(self):\n Toplevel.__init__(self)\n self.title(\"CRM System\")\n self.geometry(\"700x550+350+200\")\n self.resizable(False,False)\n\n self.top= Frame(self,height=150)\n self.top.pack(fill=X)\n self.bottom= Frame(self,height=500)\n self.bottom.pack(fill=X)\n self.heading=Label(self.top, text='Create a Client',\n font='arial 18 ')\n self.heading.place(x=260 ,y=60)\n\n self.lablel_fullname = Label(self.bottom, text=\"Utilize the same format as shown in the input fields below:\", font='arial 10')\n self.lablel_fullname.place(x=49, y=-6)\n\n # fullname, sin, dob\n self.lablel_fullname=Label(self.bottom,text=\"Full name, SIN#, DOB:\",font='arial 15')\n self.lablel_fullname.place(x=49,y=19)\n self.entry_fullname=Entry(self.bottom,width=30,bd=4)\n self.entry_fullname.insert(0,\"John Doe, 999999999, 1960-01-01\")\n self.entry_fullname.place(x=265,y=19)\n\n #jobtitle\n self.lablel_JobTittle=Label(self.bottom,text=\"Job title:\",font='arial 15')\n self.lablel_JobTittle.place(x=49,y=49)\n self.entry_JobTittle=Entry(self.bottom,width=30,bd=4)\n self.entry_JobTittle.insert(0,\"Manager\")\n self.entry_JobTittle.place(x=150,y=49)\n #phonenumber\n self.lablel_phonenumber=Label(self.bottom,text=\"Phone number:\",font='arial 15')\n self.lablel_phonenumber.place(x=49,y=79)\n self.entry_phonenumber=Entry(self.bottom,width=30,bd=4)\n self.entry_phonenumber.insert(0,\"6040001111\")\n self.entry_phonenumber.place(x=200,y=79)\n #buisnessphonenumber\n self.lablel_buisnessphonenumber=Label(self.bottom,text=\"Business phone number:\",font='arial 15')\n self.lablel_buisnessphonenumber.place(x=49,y=109)\n self.entry_buisnessphonenumber=Entry(self.bottom,width=30,bd=4)\n self.entry_buisnessphonenumber.insert(0,\"6040002222\")\n self.entry_buisnessphonenumber.place(x=280,y=109)\n #networth\n self.lablel_networth=Label(self.bottom,text=\"Net worth:\",font='arial 15')\n self.lablel_networth.place(x=49,y=139)\n self.entry_networth=Entry(self.bottom,width=30,bd=4)\n self.entry_networth.insert(0,\"100000\")\n self.entry_networth.place(x=150,y=139)\n #assets\n self.lablel_assets=Label(self.bottom,text=\"Assets:\",font='arial 15')\n self.lablel_assets.place(x=49,y=169)\n self.entry_assets=Entry(self.bottom,width=30,bd=4)\n self.entry_assets.insert(0,\"2 cars, 1 house\")\n self.entry_assets.place(x=150,y=169)\n #liabilities\n self.lablel_liabilities=Label(self.bottom,text=\"Liabilities:\",font='arial 15')\n self.lablel_liabilities.place(x=49,y=199)\n self.entry_liabilities=Entry(self.bottom,width=30,bd=4)\n self.entry_liabilities.insert(0,\"Car payments, 2 CCs to pay off\")\n self.entry_liabilities.place(x=150,y=199)\n #expenses\n self.lablel_expenses=Label(self.bottom,text=\"Expenses:\",font='arial 15')\n self.lablel_expenses.place(x=49,y=229)\n self.entry_expenses=Entry(self.bottom,width=30,bd=4)\n self.entry_expenses.insert(0,\"25000\")\n self.entry_expenses.place(x=150,y=229)\n #email\n self.lablel_email=Label(self.bottom,text=\"E-mail:\",font='arial 15')\n self.lablel_email.place(x=49,y=259)\n self.entry_email=Entry(self.bottom,width=30,bd=4)\n self.entry_email.insert(0,\"email@johndoe.com\")\n self.entry_email.place(x=150,y=259)\n \n\n\n btnadd=Button(self.bottom,text=\"Add New Client\",font='arial 12 ',width=15,height=2,command=self.add_client)\n btnadd.place(x=270,y=320)\n \n def add_client(self):\n full_name=self.entry_fullname.get()\n job_tittle=self.entry_JobTittle.get()\n\n phone_number=self.entry_phonenumber.get()\n buisness_number=self.entry_buisnessphonenumber.get()\n net_worth=self.entry_networth.get()\n assets=self.entry_assets.get()\n liabilites=self.entry_liabilities.get()\n Expenses=self.entry_expenses.get()\n email=self.entry_email.get()\n\n client_error: bool = False\n\n #check if the client information contains errors\n\n if full_name == \"\"or job_tittle == \"\"or phone_number == \"\"or buisness_number == \"\"or net_worth == \"\"or assets == \"\"or liabilites == \"\"or Expenses == \"\"or email == \"\":\n messagebox.showinfo(\"Error\",\"Please fill all the fields.\")\n client_error = True\n self.destroy()\n\n if isinstance(full_name, str) is False or isinstance(job_tittle, str) is False or phone_number.isnumeric() is False or buisness_number.isnumeric() is False or net_worth.isnumeric() is False or isinstance(assets, str) is False or isinstance (liabilites, str) is False or Expenses.isnumeric() is False or isinstance(email, str) is False:\n messagebox.showinfo(\"Error\",\"One or more user input is not in the correct format (numbers, letters, etc) Do not use the $ for money amounts.\")\n client_error = True\n self.destroy()\n\n # checks if the full name is in the correct format prior to splitting it up into lists for further error checking\n if full_name.count('-') != 2 or full_name.count(',') != 2 or full_name.count(' ') < 2:\n messagebox.showinfo(\"Error\",\n \"Please enter the names, social security number and date of birth seperated by commas and the date of birth in the YYYY-MM-DD format. Use spaces between the sections.\")\n client_error = True\n self.destroy()\n else:\n # holds the name, sin number and dob in a list\n client_list = full_name.split(\",\")\n\n date_of_birth_list = client_list[2].split(\"-\")\n\n # checks the number of numbers in the SIN number, must be a 9 digit number\n if len(client_list[1]) - client_list[1].count(' ') != 9:\n messagebox.showinfo(\"Error\", \"Please enter a SIN number with 9 digits.\")\n client_error = True\n self.destroy()\n \n #checks if the SIN number is numeric\n if client_list[1].strip().isnumeric() == False:\n messagebox.showinfo(\"Error\", \"Please enter a SIN number with with only numeric characters.\")\n client_error = True\n self.destroy()\n\n # checks if the date of birth is in the right format\n if client_list[2].count('-') != 2 or len(date_of_birth_list[0]) - date_of_birth_list[0].count(\n ' ') != 4 or len(date_of_birth_list[1]) - date_of_birth_list[1].count(' ') != 2 or len(\n date_of_birth_list[2]) - date_of_birth_list[2].count(' ') != 2:\n messagebox.showinfo(\"Error\", \"Please enter the date of birth in the YYYY-MM-DD format.\")\n client_error = True\n self.destroy()\n\n\n if email.count('@') < 1:\n messagebox.showinfo(\"Error\",\"Please enter a email with a @ symbol.\")\n client_error = True\n self.destroy()\n\n if len(buisness_number) != 10:\n messagebox.showinfo(\"Error\",\"Please enter a 10 digit phone number.\")\n client_error = True\n self.destroy()\n \n if len(phone_number) != 10:\n messagebox.showinfo(\"Error\",\"Please enter a 10 digit phone number.\")\n client_error = True\n self.destroy()\n\n\n #if no errors are present, try to add the client into the db\n if client_error == False:\n try:\n query = \"insert into 'client_info'('full_name','job_tittle','phone_number','buisness_number','net_worth','assets','liabilites','Expenses','email') values(?,?,?,?,?,?,?,?,?)\"\n cur.execute(query, (\n full_name, job_tittle, phone_number, buisness_number, net_worth, assets, liabilites, Expenses, email))\n except:\n messagebox.showerror(\"Error\")\n self.destroy()\n else:\n con.commit()\n messagebox.showinfo(\"Success\", \"Client was added successfully.\")\n self.destroy()\n", "repo_name": "hardec303/finalProject", "sub_path": "create_client.py", "file_name": "create_client.py", "file_ext": "py", "file_size_in_byte": 8555, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "77", "api": [{"api_name": "sqlite3.connect", "line_number": 8, "usage_type": "call"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 109, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 109, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 114, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 114, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 120, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 120, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 132, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 132, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 138, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 138, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 146, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 146, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 152, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 152, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 157, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 157, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 162, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 162, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showerror", "line_number": 174, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 174, "usage_type": "name"}, {"api_name": "tkinter.messagebox.showinfo", "line_number": 178, "usage_type": "call"}, {"api_name": "tkinter.messagebox", "line_number": 178, "usage_type": "name"}]} +{"seq_id": "34257012008", "text": "# -*- coding: utf-8 -*-\nfrom odoo import api, models, fields\nfrom odoo.tools.translate import _\nfrom odoo.exceptions import UserError\nimport logging\n_logger = logging.getLogger(__name__)\n\n\nclass AccountJournalSiiDocumentClass(models.Model):\n _name = \"account.journal.dian_document_class\"\n _description = \"Journal DIAN Documents\"\n _order = 'sequence'\n\n @api.depends('dian_document_class_id', 'sequence_id')\n def get_secuence_name(self):\n for r in self:\n sequence_name = (': ' + r.sequence_id.name) if r.sequence_id else ''\n name = (r.dian_document_class_id.name or '') + sequence_name\n r.name = name\n\n name = fields.Char(\n compute=\"get_secuence_name\",\n )\n dian_document_class_id = fields.Many2one(\n 'dian.document_class',\n string='Document Type',\n required=True,\n )\n sequence_id = fields.Many2one(\n 'ir.sequence',\n string='Entry Sequence',\n help=\"\"\"This field contains the information related to the numbering \\\n of the documents entries of this document type.\"\"\",\n )\n journal_id = fields.Many2one(\n 'account.journal',\n string='Journal',\n required=True,\n )\n sequence = fields.Integer(\n string='Sequence',\n )\n\n @api.onchange('dian_document_class_id')\n def check_dian_document_class(self):\n if self.dian_document_class_id and self.sequence_id and self.dian_document_class_id != self.sequence_id.dian_document_class_id:\n raise UserError(\"El tipo de Documento de la secuencia es distinto\")\n\nclass account_journal(models.Model):\n _inherit = \"account.journal\"\n\n journal_document_class_ids = fields.One2many(\n 'account.journal.dian_document_class',\n 'journal_id',\n 'Documents Class',\n )\n use_documents = fields.Boolean(\n string='Use Documents?',\n default='_get_default_doc',\n )\n\n restore_mode = fields.Boolean(\n string=\"Restore Mode\",\n default=False,\n )\n\n @api.onchange('journal_activities_ids')\n def max_actecos(self):\n if len(self.journal_activities_ids) > 4:\n raise UserError(\"Deben Ser máximo 4 actecos por Diario, seleccione los más significativos para este diario\")\n\n @api.multi\n def _get_default_doc(self):\n self.ensure_one()\n if self.type == 'sale' or self.type == 'purchase':\n self.use_documents = True\n\n @api.multi\n def name_get(self):\n res = []\n for journal in self:\n currency = journal.currency_id or journal.company_id.currency_id\n name = \"%s (%s)\" % (journal.name, currency.name)\n #if journal.sucursal_id and self.env.context.get('show_full_name', False):\n # name = \"%s (%s)\" % (name, journal.sucursal_id.name)\n res.append((journal.id, name))\n return res\n", "repo_name": "dansanti/l10n_co_fe", "sub_path": "models/account_journal.py", "file_name": "account_journal.py", "file_ext": "py", "file_size_in_byte": 2974, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 12, "dataset": "github-code", "pt": "76", "api": [{"api_name": "logging.getLogger", "line_number": 6, "usage_type": "call"}, {"api_name": "odoo.models.Model", "line_number": 9, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 9, "usage_type": "name"}, {"api_name": "odoo.api.depends", "line_number": 14, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 14, "usage_type": "name"}, {"api_name": "odoo.fields.Char", "line_number": 21, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 21, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 24, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 24, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 29, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 29, "usage_type": "name"}, {"api_name": "odoo.fields.Many2one", "line_number": 35, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 35, "usage_type": "name"}, {"api_name": "odoo.fields.Integer", "line_number": 40, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 40, "usage_type": "name"}, {"api_name": "odoo.exceptions.UserError", "line_number": 47, "usage_type": "call"}, {"api_name": "odoo.api.onchange", "line_number": 44, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 44, "usage_type": "name"}, {"api_name": "odoo.models.Model", "line_number": 49, "usage_type": "attribute"}, {"api_name": "odoo.models", "line_number": 49, "usage_type": "name"}, {"api_name": "odoo.fields.One2many", "line_number": 52, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 52, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 57, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 57, "usage_type": "name"}, {"api_name": "odoo.fields.Boolean", "line_number": 62, "usage_type": "call"}, {"api_name": "odoo.fields", "line_number": 62, "usage_type": "name"}, {"api_name": "odoo.exceptions.UserError", "line_number": 70, "usage_type": "call"}, {"api_name": "odoo.api.onchange", "line_number": 67, "usage_type": "call"}, {"api_name": "odoo.api", "line_number": 67, "usage_type": "name"}, {"api_name": "odoo.api.multi", "line_number": 72, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 72, "usage_type": "name"}, {"api_name": "odoo.api.multi", "line_number": 78, "usage_type": "attribute"}, {"api_name": "odoo.api", "line_number": 78, "usage_type": "name"}]} +{"seq_id": "72862076726", "text": "from django.shortcuts import redirect, render\nfrom .models import *\nfrom django.contrib import messages\nfrom django.core.mail import send_mail\n\ndef home(request):\n\n data = {\n \"services\":Services.objects.all(),\n \"members\": Member.objects.all(),\n \"testimonial\": Testimonial.objects.all(),\n \"address\": Address.objects.all()[:1]\n }\n\n\n return render(request,\"index.html\",data)\n\ndef contact(request):\n if request.method == \"POST\":\n name = request.POST.get(\"name\")\n email = request.POST.get(\"email\")\n phone = request.POST.get(\"phone\")\n subject = request.POST.get(\"subject\")\n message = request.POST.get(\"message\")\n Contact.objects.create(\n name = name,\n email = email,\n mobile_number = phone,\n subject = subject,\n message = message\n )\n\n send_mail(f\"Name - {name}, Email - {email}, Phone Number - {phone}, Subject- {subject}\",message,\"urbanspacerealtors.rkl@gmail.com\",[\"hr.rsdastudio@gmail.com\"])\n messages.success( request,\"Thank you for contacting us\")\n return redirect(\"home\")\n\n return redirect(\"home\")\n\n\n\ndef testimonialform(request):\n if request.method == \"POST\":\n name = request.POST.get(\"name\")\n prof = request.POST.get(\"prof\")\n prop_img = request.FILES.get(\"prop_img\")\n disc = request.POST.get(\"disc\")\n Testimonial.objects.create(\n name = name,\n prof = prof,\n prop_img = prop_img,\n disc = disc\n )\n messages.success(request,\"Thank you for giving your valuble feedback here\")\n return redirect(\"home\")\n return render(request,\"form.html\")\n\n", "repo_name": "ishwar-jethwani/portfolio", "sub_path": "core/views.py", "file_name": "views.py", "file_ext": "py", "file_size_in_byte": 1731, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.shortcuts.render", "line_number": 16, "usage_type": "call"}, {"api_name": "django.core.mail.send_mail", "line_number": 33, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 34, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 34, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 35, "usage_type": "call"}, {"api_name": "django.shortcuts.redirect", "line_number": 37, "usage_type": "call"}, {"api_name": "django.contrib.messages.success", "line_number": 53, "usage_type": "call"}, {"api_name": "django.contrib.messages", "line_number": 53, "usage_type": "name"}, {"api_name": "django.shortcuts.redirect", "line_number": 54, "usage_type": "call"}, {"api_name": "django.shortcuts.render", "line_number": 55, "usage_type": "call"}]} +{"seq_id": "22463180524", "text": "from pip._vendor.distlib.compat import raw_input\nimport random\n\n\ndef food_id(food):\n ''' Returns categorization of food\n\n food is a string\n returns a string of categories\n '''\n # The data\n fruits = ['apple', 'banana', 'orange']\n citrus = ['orange']\n starchy = ['banana', 'potato']\n\n # Check the category and report\n if food in fruits:\n if food in citrus:\n return 'Citrus, Fruit'\n else:\n return 'NOT Citrus, Fruit'\n else:\n if food in starchy:\n return 'Starchy, NOT Fruit'\n else:\n return 'NOT Starchy, NOT Fruit'\n\n\ndef food_id_test():\n ''' Unit test for food_id\n returns True if good, returns False and prints error if not good\n '''\n\n works = True\n if food_id('orange') != 'Citrus, Fruit':\n works = 'orange bug in food id()'\n if food_id('banana') != 'NOT Citrus, Fruit':\n works = 'banana bug in food_id()'\n if food_id('potato') != 'Starchy, NOT Fruit':\n works = 'potato bug in food_id()'\n if food_id('other') != 'NOT Starchy, NOT Fruit':\n works = 'other bug in food_id()'\n # Add tests so that all lines of code are visited during test\n\n if works == True:\n print(\"All good!\")\n return True\n else:\n print(works)\n return False\n\n\ndef f(x):\n if int(x) != x:\n print(\"n is not an integer\")\n if x % 2 != 0:\n print(\"n is odd\")\n if x % 3 != 0:\n print(\"n is even\")\n else:\n print(\"n is a multiple of 6\")\n\n\ndef guess_once():\n secret = random.randint(1, 4)\n print('I have a number between 1 and 4.')\n guess = int(raw_input('Guess: '))\n if guess != secret:\n if guess <= secret:\n print('Too low - my number is ', secret, sep='', end='!\\n')\n else:\n print('Too high - my number is ', secret, sep='', end='!\\n')\n else:\n print('Right, my number is', guess, end='!\\n')\n\ndef quiz_decimal(low, high):\n print('Type a number between', low, 'and', high, '.')\n number = float(raw_input('Number: '))\n if number >= high:\n print('No, ', number, 'is greater than', high,)\n if number <= low:\n print('No, ', number, 'is less than', low,)\n else:\n print('Yes, ', low, ' <= ', number, ' <= ', high)\n\n\n", "repo_name": "Techzerobytesman/No-Step-On-Snek", "sub_path": "Dillon,Alex_1_3_4A.py", "file_name": "Dillon,Alex_1_3_4A.py", "file_ext": "py", "file_size_in_byte": 2302, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "random.randint", "line_number": 65, "usage_type": "call"}, {"api_name": "pip._vendor.distlib.compat.raw_input", "line_number": 67, "usage_type": "call"}, {"api_name": "pip._vendor.distlib.compat.raw_input", "line_number": 78, "usage_type": "call"}]} +{"seq_id": "71567561525", "text": "'''\nCreated on Dec 4, 2013\n\n@author: u0490822\n'''\n\nimport numpy as np\nimport scipy.misc as misc\nfrom PIL import Image, TiffTags\n\ndef TiledTifSave():\n '''Allocate an image with dimensions larger than 2^31 and save it as a tiled tif'''\n\n # for i in range(14, 17):\n # dim = 1 << i\n # dim -= 8\n\n print(Image.PILLOW_VERSION)\n\n # xdim = 48000\n # ydim = 32769\n xdim = 1024\n ydim = 1024\n dtypestr = \"uint8\"\n dtype = np.uint8\n\n a = np.ones((xdim, ydim), dtype=dtype)\n for iRow in range(0, ydim):\n a[iRow, :] = iRow % 256\n\n outputname = '%dx%d_%s_test.tif' % (xdim, ydim, dtypestr)\n\n img = Image.fromarray(a, 'L')\n tiff_info = CreateTiledTiffInfo()\n img.save(outputname, tiffinfo=tiff_info)\n\n print(\"All done!\")\n\ndef TagNameToTagNumber():\n NameToNumber = {}\n for k, v in TiffTags.TAGS.items():\n NameToNumber[v] = k\n\n return NameToNumber\n\ndef CreateTiledTiffInfo(img=None):\n '''Create the tiff_info dictionary required to prompt Pillow to write a tiled tiff file'''\n\n Tags = TagNameToTagNumber()\n\n tiff_info = {}\n tiff_info[Tags[\"TileWidth\"]] = 256\n tiff_info[Tags[\"TileLength\"]] = 256\n\n return tiff_info\n\n\nif __name__ == '__main__':\n TiledTifSave()\n pass", "repo_name": "jamesra/random", "sub_path": "pillow_tif_tiles.py", "file_name": "pillow_tif_tiles.py", "file_ext": "py", "file_size_in_byte": 1253, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "PIL.Image.PILLOW_VERSION", "line_number": 18, "usage_type": "attribute"}, {"api_name": "PIL.Image", "line_number": 18, "usage_type": "name"}, {"api_name": "numpy.uint8", "line_number": 25, "usage_type": "attribute"}, {"api_name": "numpy.ones", "line_number": 27, "usage_type": "call"}, {"api_name": "PIL.Image.fromarray", "line_number": 33, "usage_type": "call"}, {"api_name": "PIL.Image", "line_number": 33, "usage_type": "name"}, {"api_name": "PIL.TiffTags.TAGS.items", "line_number": 41, "usage_type": "call"}, {"api_name": "PIL.TiffTags.TAGS", "line_number": 41, "usage_type": "attribute"}, {"api_name": "PIL.TiffTags", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "28594936060", "text": "\"\"\"Fix to introduce on delete cascade\n\nRevision ID: ef82c4359bb4\nRevises: 3d2d24f3da61\nCreate Date: 2020-08-28 22:14:09.294268\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ef82c4359bb4'\ndown_revision = '3d2d24f3da61'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(\n 'data_format_files_data_type_id_fkey', 'data_format_files', type_='foreignkey'\n )\n op.create_foreign_key(\n None,\n 'data_format_files',\n 'data_types',\n ['data_type_id'],\n ['id'],\n ondelete='CASCADE',\n )\n op.drop_constraint('data_types_project_id_fkey', 'data_types', type_='foreignkey')\n op.create_foreign_key(\n None, 'data_types', 'projects', ['project_id'], ['id'], ondelete='CASCADE'\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'data_types', type_='foreignkey')\n op.create_foreign_key(\n 'data_types_project_id_fkey', 'data_types', 'projects', ['project_id'], ['id']\n )\n op.drop_constraint(None, 'data_format_files', type_='foreignkey')\n op.create_foreign_key(\n 'data_format_files_data_type_id_fkey',\n 'data_format_files',\n 'data_types',\n ['data_type_id'],\n ['id'],\n )\n # ### end Alembic commands ###\n", "repo_name": "ikennaokpala/flask-restful-api-example", "sub_path": "src/main/config/db/migrations/versions/ef82c4359bb4_.py", "file_name": "ef82c4359bb4_.py", "file_ext": "py", "file_size_in_byte": 1470, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "alembic.op.drop_constraint", "line_number": 21, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 21, "usage_type": "name"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 24, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 24, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 32, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 32, "usage_type": "name"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 33, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 33, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 41, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 41, "usage_type": "name"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 42, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 42, "usage_type": "name"}, {"api_name": "alembic.op.drop_constraint", "line_number": 45, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 45, "usage_type": "name"}, {"api_name": "alembic.op.create_foreign_key", "line_number": 46, "usage_type": "call"}, {"api_name": "alembic.op", "line_number": 46, "usage_type": "name"}]} +{"seq_id": "6612915112", "text": "from sqlalchemy.exc import IntegrityError\n\nfrom tests.base import BaseTestCase\nfrom tests.test_models.test_user_model import create_user\n\nfrom app import db\nfrom models.deck import Deck\nfrom models.user import User\n\n\ndef create_deck(owner, **overrides):\n params = {\n \"name\": \"My Awesome Deck\",\n \"owner\": owner,\n }\n params.update(overrides)\n\n deck = Deck(**params)\n\n db.session.add(deck)\n db.session.commit()\n\n return deck\n\n\nclass TestDeckModel(BaseTestCase):\n def setUp(self):\n super(TestDeckModel, self).setUp()\n\n self.test_user = create_user()\n\n def test_raises_integrity_error_if_name_is_null(self):\n self.assertRaises(IntegrityError, create_deck, owner=self.test_user, name=None)\n\n def test_raises_error_if_owner_is_invalid(self):\n self.assertRaises(IntegrityError, create_deck, owner=User())\n\n def test_creates_deck(self):\n deck = create_deck(owner=self.test_user)\n\n deck_in_db = Deck.query.one()\n\n self.assertEqual(deck, deck_in_db)\n self.assertEqual(self.test_user, deck_in_db.owner)\n self.assertEqual([deck], self.test_user.decks)\n", "repo_name": "ravipunj/flashy", "sub_path": "tests/test_models/test_deck_model.py", "file_name": "test_deck_model.py", "file_ext": "py", "file_size_in_byte": 1149, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "models.deck.Deck", "line_number": 18, "usage_type": "call"}, {"api_name": "app.db.session.add", "line_number": 20, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 20, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 20, "usage_type": "name"}, {"api_name": "app.db.session.commit", "line_number": 21, "usage_type": "call"}, {"api_name": "app.db.session", "line_number": 21, "usage_type": "attribute"}, {"api_name": "app.db", "line_number": 21, "usage_type": "name"}, {"api_name": "tests.base.BaseTestCase", "line_number": 26, "usage_type": "name"}, {"api_name": "tests.test_models.test_user_model.create_user", "line_number": 30, "usage_type": "call"}, {"api_name": "sqlalchemy.exc.IntegrityError", "line_number": 33, "usage_type": "argument"}, {"api_name": "sqlalchemy.exc.IntegrityError", "line_number": 36, "usage_type": "argument"}, {"api_name": "models.user.User", "line_number": 36, "usage_type": "call"}, {"api_name": "models.deck.Deck.query.one", "line_number": 41, "usage_type": "call"}, {"api_name": "models.deck.Deck.query", "line_number": 41, "usage_type": "attribute"}, {"api_name": "models.deck.Deck", "line_number": 41, "usage_type": "name"}]} +{"seq_id": "26895458478", "text": "from django.urls import path\nfrom Firstapp import views\n\nurlpatterns=[\n #Welcome admin Page\n path('admin_welcome/',views.welcome_admin),\n\n #IT JOBS\n path('dashboard/',views.dashboard),\n path('it_show/',views.it_show),\n path('it_add/',views.it_add),\n path('it_update//',views.it_update),\n path('it_delete//',views.it_delete),\n\n #MECHANICAL JOBS\n path('mech_show/',views.mech_show),\n path('mech_add/',views.mech_add),\n path('mech_update//',views.mech_update),\n path('mech_delete//',views.mech_delete),\n\n #CIVIL JOBS\n path('civil_show/',views.civil_show),\n path('civil_add/',views.civil_add),\n path('civil_update//',views.civil_update),\n path('civil_delete//',views.civil_delete),\n\n #show resume\n path('show_resume//',views.showresume),\n path('mech_show_resume//',views.mech_showresume),\n path('civil_show_resume//',views.civil_showresume)\n\n\n\n]", "repo_name": "Ruchika-Munde/Django_Task", "sub_path": "Dashboard/dashboard/Firstapp/urls.py", "file_name": "urls.py", "file_ext": "py", "file_size_in_byte": 979, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "django.urls.path", "line_number": 6, "usage_type": "call"}, {"api_name": "Firstapp.views.welcome_admin", "line_number": 6, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 6, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 9, "usage_type": "call"}, {"api_name": "Firstapp.views.dashboard", "line_number": 9, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 9, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 10, "usage_type": "call"}, {"api_name": "Firstapp.views.it_show", "line_number": 10, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 10, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 11, "usage_type": "call"}, {"api_name": "Firstapp.views.it_add", "line_number": 11, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 11, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 12, "usage_type": "call"}, {"api_name": "Firstapp.views.it_update", "line_number": 12, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 12, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 13, "usage_type": "call"}, {"api_name": "Firstapp.views.it_delete", "line_number": 13, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 13, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 16, "usage_type": "call"}, {"api_name": "Firstapp.views.mech_show", "line_number": 16, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 16, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 17, "usage_type": "call"}, {"api_name": "Firstapp.views.mech_add", "line_number": 17, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 17, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 18, "usage_type": "call"}, {"api_name": "Firstapp.views.mech_update", "line_number": 18, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 18, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 19, "usage_type": "call"}, {"api_name": "Firstapp.views.mech_delete", "line_number": 19, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 19, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 22, "usage_type": "call"}, {"api_name": "Firstapp.views.civil_show", "line_number": 22, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 22, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 23, "usage_type": "call"}, {"api_name": "Firstapp.views.civil_add", "line_number": 23, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 23, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 24, "usage_type": "call"}, {"api_name": "Firstapp.views.civil_update", "line_number": 24, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 24, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 25, "usage_type": "call"}, {"api_name": "Firstapp.views.civil_delete", "line_number": 25, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 25, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 28, "usage_type": "call"}, {"api_name": "Firstapp.views.showresume", "line_number": 28, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 28, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 29, "usage_type": "call"}, {"api_name": "Firstapp.views.mech_showresume", "line_number": 29, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 29, "usage_type": "name"}, {"api_name": "django.urls.path", "line_number": 30, "usage_type": "call"}, {"api_name": "Firstapp.views.civil_showresume", "line_number": 30, "usage_type": "attribute"}, {"api_name": "Firstapp.views", "line_number": 30, "usage_type": "name"}]} +{"seq_id": "38773721213", "text": "from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter\nfrom src.gat2vec import gat2vec\nfrom Evaluation.Classification import Classification\n\n\ndef main():\n parser = ArgumentParser(\"gat2vec\",\n formatter_class=ArgumentDefaultsHelpFormatter,\n conflict_handler='resolve')\n\n parser.add_argument('--data', nargs='?', required=True,\n help='Input data directory in ../data/ folder')\n\n parser.add_argument('--label', nargs='?', default=False, type=bool,\n help=' If data is labelled')\n\n parser.add_argument('--num-walks', default=10, type=int,\n help='Random walks per node')\n\n parser.add_argument('--walk-length', default=80, type=int,\n help='Random walk length')\n\n parser.add_argument('--output', default=True,\n help='save output embedding')\n\n parser.add_argument('--dimension', default=128, type=int,\n help='size of representation.')\n\n parser.add_argument('--window-size', default=5, type=int,\n help='Window size of skipgram model.')\n return parser.parse_args()\n\nif __name__ == \"__main__\":\n args = main()\n g2v = gat2vec(args.data)\n model = g2v.train_gat2vec(args.data, args.label, args.num_walks, args.walk_length, args.dimension,\n args.window_size, args.output)\n\n ''' for blogcatalog set multilabel = True'''\n c_eval = Classification(args.data, multilabel=False)\n c_eval.evaluate(model, args.label)", "repo_name": "afcarl/REFLAG", "sub_path": "__main__.py", "file_name": "__main__.py", "file_ext": "py", "file_size_in_byte": 1594, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "argparse.ArgumentParser", "line_number": 7, "usage_type": "call"}, {"api_name": "argparse.ArgumentDefaultsHelpFormatter", "line_number": 8, "usage_type": "name"}, {"api_name": "src.gat2vec.gat2vec", "line_number": 35, "usage_type": "call"}, {"api_name": "Evaluation.Classification.Classification", "line_number": 40, "usage_type": "call"}]} +{"seq_id": "25268775131", "text": "# Function for transformation of dataframes for hotspots by subregion\nimport requests\nimport pandas as pd\nimport time\n\nimport os\nfrom dotenv import load_dotenv\nload_dotenv()\napi_key = os.environ.get('EBIRD_API_KEY')\n\ndef fetch_hotspot_data(hotspots):\n hotspot_data_list = []\n \n for hotspot in hotspots:\n hotspots_url = 'https://api.ebird.org/v2/ref/hotspot/' + hotspot + '?fmt=json'\n try:\n response = requests.get(hotspots_url, params=api_key)\n response.raise_for_status() # Raise an exception for bad status codes\n\n hotspots_data = response.json()\n hotspots_df = pd.DataFrame(hotspots_data)\n hotspot_data_list.append(hotspots_df)\n\n time.sleep(1) # Add a 1-second delay to avoid rate limits\n except requests.exceptions.RequestException as e:\n print(f\"An error occurred for hotspot {hotspot}: {e}\")\n print(response.content)\n\n # Concatenate all DataFrames in the list into a single DataFrame\n all_hotspots_df = pd.concat(hotspot_data_list, ignore_index=True)\n\n return all_hotspots_df", "repo_name": "nicole440/dryocopus-pileatus", "sub_path": "get_local_hotspots.py", "file_name": "get_local_hotspots.py", "file_ext": "py", "file_size_in_byte": 1110, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "dotenv.load_dotenv", "line_number": 8, "usage_type": "call"}, {"api_name": "os.environ.get", "line_number": 9, "usage_type": "call"}, {"api_name": "os.environ", "line_number": 9, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 17, "usage_type": "call"}, {"api_name": "pandas.DataFrame", "line_number": 21, "usage_type": "call"}, {"api_name": "time.sleep", "line_number": 24, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 25, "usage_type": "attribute"}, {"api_name": "pandas.concat", "line_number": 30, "usage_type": "call"}]} +{"seq_id": "17415051269", "text": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function, absolute_import\nfrom sqlalchemy.ext.orderinglist import ordering_list\n\nfrom .. import db\nfrom ..models import declarative_base\nfrom ..resource import (\n Resource,\n Scope,\n Permission,\n ResourceScope,\n Serializer,\n SerializedProperty as SP,\n SerializedResourceRelationship as SRR,\n ResourceGroup)\n\nfrom .util import _\n\nBase = declarative_base()\n\n\nclass WebMapScope(Scope):\n identity = 'webmap'\n label = _(\"Web map\")\n\n display = Permission(_(\"Display\"))\n\n\nclass WebMap(Base, Resource):\n identity = 'webmap'\n cls_display_name = _(\"Web map\")\n\n __scope__ = WebMapScope\n\n root_item_id = db.Column(db.ForeignKey('webmap_item.id'), nullable=False)\n bookmark_resource_id = db.Column(db.ForeignKey(Resource.id), nullable=True)\n draw_order_enabled = db.Column(db.Boolean, nullable=True)\n editable = db.Column(db.Boolean, nullable=False, default=False)\n\n extent_left = db.Column(db.Float, default=-180)\n extent_right = db.Column(db.Float, default=+180)\n extent_bottom = db.Column(db.Float, default=-90)\n extent_top = db.Column(db.Float, default=+90)\n\n bookmark_resource = db.relationship(\n Resource, foreign_keys=bookmark_resource_id,\n backref=db.backref('bookmarked_webmaps'))\n\n root_item = db.relationship('WebMapItem', cascade='all')\n\n @classmethod\n def check_parent(cls, parent):\n return isinstance(parent, ResourceGroup)\n\n def to_dict(self):\n return dict(\n id=self.id,\n display_name=self.display_name,\n editable=self.editable,\n root_item=self.root_item.to_dict(),\n bookmark_resource_id=self.bookmark_resource_id,\n extent=(self.extent_left, self.extent_bottom,\n self.extent_right, self.extent_top),\n )\n\n def from_dict(self, data):\n if 'display_name' in data:\n self.display_name = data['display_name']\n\n if 'root_item' in data:\n self.root_item = WebMapItem(item_type='root')\n self.root_item.from_dict(data['root_item'])\n\n if 'bookmark_resource_id' in data:\n self.bookmark_resource_id = data['bookmark_resource_id']\n\n if 'extent' in data:\n self.extent_left, self.extent_bottom, \\\n self.extent_right, self.extent_top = data['extent']\n\n if 'editable' in data:\n self.editable = data['editable']\n\n\nclass WebMapItem(Base):\n __tablename__ = 'webmap_item'\n\n id = db.Column(db.Integer, primary_key=True)\n parent_id = db.Column(db.Integer, db.ForeignKey('webmap_item.id'))\n item_type = db.Column(db.Enum('root', 'group', 'layer'), nullable=False)\n position = db.Column(db.Integer, nullable=True)\n display_name = db.Column(db.Unicode, nullable=True)\n group_expanded = db.Column(db.Boolean, nullable=True)\n layer_style_id = db.Column(db.ForeignKey(Resource.id), nullable=True)\n layer_enabled = db.Column(db.Boolean, nullable=True)\n layer_transparency = db.Column(db.Float, nullable=True)\n layer_min_scale_denom = db.Column(db.Float, nullable=True)\n layer_max_scale_denom = db.Column(db.Float, nullable=True)\n layer_adapter = db.Column(db.Unicode, nullable=True)\n draw_order_position = db.Column(db.Integer, nullable=True)\n\n parent = db.relationship(\n 'WebMapItem', remote_side=id, backref=db.backref(\n 'children', order_by=position, cascade='all, delete-orphan',\n collection_class=ordering_list('position')))\n\n style = db.relationship(\n 'Resource',\n # Temporary solution that allows to automatically\n # remove web-map elements when style is removed\n backref=db.backref('webmap_items', cascade='all')\n )\n\n def to_dict(self):\n if self.item_type in ('root', 'group'):\n children = list(self.children)\n sorted(children, key=lambda c: c.position)\n\n if self.item_type == 'root':\n return dict(\n item_type=self.item_type,\n children=[i.to_dict() for i in children],\n )\n\n elif self.item_type == 'group':\n return dict(\n item_type=self.item_type,\n display_name=self.display_name,\n group_expanded=self.group_expanded,\n children=[i.to_dict() for i in children],\n )\n\n elif self.item_type == 'layer':\n return dict(\n item_type=self.item_type,\n display_name=self.display_name,\n layer_enabled=self.layer_enabled,\n layer_transparency=self.layer_transparency,\n layer_style_id=self.layer_style_id,\n layer_min_scale_denom=self.layer_min_scale_denom,\n layer_max_scale_denom=self.layer_max_scale_denom,\n layer_adapter=self.layer_adapter,\n draw_order_position=self.draw_order_position,\n )\n\n def from_dict(self, data):\n assert data['item_type'] == self.item_type\n if data['item_type'] in ('root', 'group') and 'children' in data:\n self.children = []\n for i in data['children']:\n child = WebMapItem(parent=self, item_type=i['item_type'])\n child.from_dict(i)\n self.children.append(child)\n\n for a in ('display_name', 'group_expanded', 'layer_enabled',\n 'layer_adapter', 'layer_style_id', 'layer_transparency',\n 'layer_min_scale_denom', 'layer_max_scale_denom',\n 'draw_order_position'):\n\n if a in data:\n setattr(self, a, data[a])\n\n\nPR_READ = ResourceScope.read\nPR_UPDATE = ResourceScope.update\n\n_mdargs = dict(read=PR_READ, write=PR_UPDATE)\n\n\nclass _root_item_attr(SP):\n\n def getter(self, srlzr):\n return srlzr.obj.root_item.to_dict()\n\n def setter(self, srlzr, value):\n if srlzr.obj.root_item is None:\n srlzr.obj.root_item = WebMapItem(item_type='root')\n\n srlzr.obj.root_item.from_dict(value)\n\n\nclass WebMapSerializer(Serializer):\n identity = WebMap.identity\n resclass = WebMap\n\n extent_left = SP(**_mdargs)\n extent_right = SP(**_mdargs)\n extent_bottom = SP(**_mdargs)\n extent_top = SP(**_mdargs)\n\n draw_order_enabled = SP(**_mdargs)\n editable = SP(**_mdargs)\n\n bookmark_resource = SRR(**_mdargs)\n\n root_item = _root_item_attr(**_mdargs)\n", "repo_name": "annamarieroja/annamarieroja.github.io", "sub_path": "nextgisweb/webmap/model.py", "file_name": "model.py", "file_ext": "py", "file_size_in_byte": 6518, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "models.declarative_base", "line_number": 19, "usage_type": "call"}, {"api_name": "resource.Scope", "line_number": 22, "usage_type": "name"}, {"api_name": "util._", "line_number": 24, "usage_type": "call"}, {"api_name": "resource.Permission", "line_number": 26, "usage_type": "call"}, {"api_name": "util._", "line_number": 26, "usage_type": "call"}, {"api_name": "resource.Resource", "line_number": 29, "usage_type": "name"}, {"api_name": "util._", "line_number": 31, "usage_type": "call"}, {"api_name": "resource.Resource.id", "line_number": 36, "usage_type": "attribute"}, {"api_name": "resource.Resource", "line_number": 36, "usage_type": "name"}, {"api_name": "resource.Resource", "line_number": 46, "usage_type": "argument"}, {"api_name": "resource.ResourceGroup", "line_number": 53, "usage_type": "argument"}, {"api_name": "resource.Resource.id", "line_number": 94, "usage_type": "attribute"}, {"api_name": "resource.Resource", "line_number": 94, "usage_type": "name"}, {"api_name": "sqlalchemy.ext.orderinglist.ordering_list", "line_number": 105, "usage_type": "call"}, {"api_name": "resource.ResourceScope.read", "line_number": 164, "usage_type": "attribute"}, {"api_name": "resource.ResourceScope", "line_number": 164, "usage_type": "name"}, {"api_name": "resource.ResourceScope.update", "line_number": 165, "usage_type": "attribute"}, {"api_name": "resource.ResourceScope", "line_number": 165, "usage_type": "name"}, {"api_name": "resource.SerializedProperty", "line_number": 170, "usage_type": "name"}, {"api_name": "resource.Serializer", "line_number": 182, "usage_type": "name"}, {"api_name": "resource.SerializedProperty", "line_number": 186, "usage_type": "call"}, {"api_name": "resource.SerializedProperty", "line_number": 187, "usage_type": "call"}, {"api_name": "resource.SerializedProperty", "line_number": 188, "usage_type": "call"}, {"api_name": "resource.SerializedProperty", "line_number": 189, "usage_type": "call"}, {"api_name": "resource.SerializedProperty", "line_number": 191, "usage_type": "call"}, {"api_name": "resource.SerializedProperty", "line_number": 192, "usage_type": "call"}, {"api_name": "resource.SerializedResourceRelationship", "line_number": 194, "usage_type": "call"}]} +{"seq_id": "7107331589", "text": "import hashlib\nfrom datetime import datetime\n\nfrom fastapi.encoders import jsonable_encoder\nfrom sqlalchemy.orm import Session\n\nfrom app.crud.base import CRUDBase\nfrom app.models.msg import Msg\nfrom app.schemas.msg import MsgCreate, MsgUpdate\n\n\nclass CRUDMsg(CRUDBase[Msg, MsgCreate, MsgUpdate]):\n def create(\n self, db: Session, *, obj_in: MsgCreate, interaction_id: str\n ) -> Msg:\n obj_in_data = jsonable_encoder(obj_in)\n db_obj = self.model(**obj_in_data, interaction_id=interaction_id)\n db_obj.created_at = datetime.now()\n db_obj.updated_at = datetime.now()\n db_obj.id = hashlib.sha256(\n (\n f\"{db_obj.id}{db_obj.content}{db_obj.role}\"\n + f\"{db_obj.interaction_id}\"\n + f\"{db_obj.created_at.strftime('%Y-%m-%d %H:%M:%S')}\"\n + f\"{db_obj.updated_at.strftime('%Y-%m-%d %H:%M:%S')}\"\n ).encode()\n ).hexdigest()\n db.add(db_obj)\n db.commit()\n db.refresh(db_obj)\n return db_obj\n\n def get_multi(\n self,\n db: Session,\n *,\n interaction_id: int,\n skip: int = 0,\n limit: int = 100,\n ) -> list[Msg]:\n return (\n db.query(self.model)\n .filter(Msg.interaction_id == interaction_id)\n .offset(skip)\n .limit(limit)\n .all()\n )\n\n\nmsg = CRUDMsg(Msg)\n", "repo_name": "smb-h/mock-ai-chat", "sub_path": "backend/app/app/crud/crud_msg.py", "file_name": "crud_msg.py", "file_ext": "py", "file_size_in_byte": 1411, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "app.crud.base.CRUDBase", "line_number": 12, "usage_type": "name"}, {"api_name": "app.models.msg.Msg", "line_number": 12, "usage_type": "name"}, {"api_name": "app.schemas.msg.MsgCreate", "line_number": 12, "usage_type": "name"}, {"api_name": "app.schemas.msg.MsgUpdate", "line_number": 12, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 14, "usage_type": "name"}, {"api_name": "app.schemas.msg.MsgCreate", "line_number": 14, "usage_type": "name"}, {"api_name": "fastapi.encoders.jsonable_encoder", "line_number": 16, "usage_type": "call"}, {"api_name": "datetime.datetime.now", "line_number": 18, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 18, "usage_type": "name"}, {"api_name": "datetime.datetime.now", "line_number": 19, "usage_type": "call"}, {"api_name": "datetime.datetime", "line_number": 19, "usage_type": "name"}, {"api_name": "hashlib.sha256", "line_number": 20, "usage_type": "call"}, {"api_name": "app.models.msg.Msg", "line_number": 15, "usage_type": "name"}, {"api_name": "sqlalchemy.orm.Session", "line_number": 35, "usage_type": "name"}, {"api_name": "app.models.msg.Msg.interaction_id", "line_number": 43, "usage_type": "attribute"}, {"api_name": "app.models.msg.Msg", "line_number": 43, "usage_type": "name"}, {"api_name": "app.models.msg.Msg", "line_number": 40, "usage_type": "name"}, {"api_name": "app.models.msg.Msg", "line_number": 50, "usage_type": "argument"}]} +{"seq_id": "6169882868", "text": "#!/usr/bin/python2\n\n__version__ = \"$Id:$\"\n__docformat__ = \"reStructuredText\"\n\nimport sys,math\n\nsys.dont_write_bytecode = 1\n\nimport pygame\nfrom pygame.locals import *\nfrom pygame.color import *\n\nimport pymunk\nfrom pymunk.vec2d import Vec2d\nfrom pymunk.pygame_util import draw, from_pygame, to_pygame\n\nfrom scene import *\nfrom entities import *\nfrom actor import *\nfrom damage import *\n\n# --------------------------------------------------------\n\nwidth, height = 700,400\nfps = 60\ndt = 1./fps\n\n# --------------------------------------------------------\n\ndef main():\n ### PyGame init\n pygame.init()\n screen = pygame.display.set_mode((width,height))\n\n clock = pygame.time.Clock()\n running = True\n font = pygame.font.SysFont(\"Arial\", 16)\n\n ### Physics stuff\n space = pymunk.Space()\n space.gravity = 0,-1000\n # box walls\n static = [pymunk.Segment(space.static_body, (10, 50), (690, 50), 5)\n , pymunk.Segment(space.static_body, (690, 50), (690, 370), 5)\n , pymunk.Segment(space.static_body, (690, 370), (10, 370), 5)\n , pymunk.Segment(space.static_body, (10, 370), (10, 50), 5)\n ]\n\n for s in static:\n s.friction = 1.\n s.group = 1\n s.elasticity = 0.9\n\n space.add(static)\n space.elasticIterations = 10\n\n scene = SceneGraph(screen, space)\n\n body = Actor()\n body.position = 100,100\n\n ball = Ball()\n ball.position = 150,90\n\n scene.register(ball)\n scene.register(body)\n\n\n while running:\n screen.fill(pygame.color.THECOLORS[\"black\"])\n\n scene.update(dt)\n\n for event in pygame.event.get():\n if event.type == QUIT or \\\n event.type == KEYDOWN and (event.key in [K_ESCAPE, K_q]):\n running = False\n else:\n body.controller.onEvent(event)\n\n scene.draw(screen)\n screen.blit(font.render(\"fps: \" + str(clock.get_fps()), 1, THECOLORS[\"white\"]), (0,0))\n screen.blit(font.render(\"%d%%\" % ball.damage, 1, THECOLORS[\"white\"]), (600,0))\n pygame.display.flip()\n\n space.step(dt)\n clock.tick(fps)\n\n\nif __name__ == '__main__':\n sys.exit(main())\n", "repo_name": "isovector/smashball", "sub_path": "smashball.py", "file_name": "smashball.py", "file_ext": "py", "file_size_in_byte": 2189, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "sys.dont_write_bytecode", "line_number": 8, "usage_type": "attribute"}, {"api_name": "pygame.init", "line_number": 33, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 34, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 34, "usage_type": "attribute"}, {"api_name": "pygame.time.Clock", "line_number": 36, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 36, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 38, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 38, "usage_type": "attribute"}, {"api_name": "pymunk.Space", "line_number": 41, "usage_type": "call"}, {"api_name": "pymunk.Segment", "line_number": 44, "usage_type": "call"}, {"api_name": "pymunk.Segment", "line_number": 45, "usage_type": "call"}, {"api_name": "pymunk.Segment", "line_number": 46, "usage_type": "call"}, {"api_name": "pymunk.Segment", "line_number": 47, "usage_type": "call"}, {"api_name": "scene.register", "line_number": 66, "usage_type": "call"}, {"api_name": "scene.register", "line_number": 67, "usage_type": "call"}, {"api_name": "pygame.color", "line_number": 71, "usage_type": "attribute"}, {"api_name": "scene.update", "line_number": 73, "usage_type": "call"}, {"api_name": "pygame.event.get", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 75, "usage_type": "attribute"}, {"api_name": "scene.draw", "line_number": 82, "usage_type": "call"}, {"api_name": "pygame.display.flip", "line_number": 85, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 85, "usage_type": "attribute"}, {"api_name": "sys.exit", "line_number": 92, "usage_type": "call"}]} +{"seq_id": "42368559129", "text": "import discord as d\nfrom discord.ext import commands\nfrom bot.constants import EMOJI_SUCCESS, EMOJI_WARNING, PRIMARY_COLOR\n\n\nclass Duelist(commands.Cog):\n def __init__(self, bot) -> None:\n self.bot = bot\n\n @commands.slash_command()\n @d.guild_only()\n async def duelist_create_role(self, ctx: d.ApplicationContext):\n \"\"\"Creates duelist role if it does not alreay exist\n\n Handled Cases:\n - Does not have permission to manage roles\n - Role already exists\n \"\"\"\n embed = d.Embed(color=PRIMARY_COLOR)\n if not ctx.user.guild_permissions.manage_roles: # type: ignore\n embed.description = f\"{EMOJI_WARNING} You don't have the permission to manage roles. Please contact with the admin\"\n await ctx.respond(embed=embed, ephemeral=True)\n return\n for role in ctx.guild.roles: # type: ignore\n if role.name == \"duelist\":\n embed.description = f\"{EMOJI_WARNING} {role.mention} already exists\"\n await ctx.respond(embed=embed, ephemeral=True)\n return\n role = await ctx.guild.create_role(name=\"duelist\") # type: ignore\n embed.description = f\"{EMOJI_SUCCESS} Created {role.mention} role\"\n await ctx.respond(embed=embed, ephemeral=True)\n\n @commands.slash_command()\n @d.guild_only()\n async def duelist_get_role(self, ctx: d.ApplicationContext):\n \"\"\"Get notified for open duels\n\n Handled Cases:\n - Already have duelist role\n - Role does not exist in guild\n \"\"\"\n embed = d.Embed(color=PRIMARY_COLOR)\n for role in ctx.user.roles: # type: ignore\n if role.name == \"duelist\":\n embed.description = (\n f\"{EMOJI_WARNING} You already have {role.mention} role\"\n )\n await ctx.respond(embed=embed, ephemeral=True)\n return\n for role in ctx.guild.roles: # type: ignore\n if role.name == \"duelist\":\n await ctx.user.add_roles(role) # type: ignore\n embed.description = f\"{EMOJI_SUCCESS} Added {role.mention} role\"\n await ctx.respond(embed=embed, ephemeral=True)\n return\n embed.description = f\"{EMOJI_WARNING} This server does not have `duelist` role. Please use /duelist_create_role to create the role\"\n await ctx.respond(embed=embed, ephemeral=True)\n\n @commands.slash_command()\n @d.guild_only()\n async def duelist_remove_role(self, ctx: d.ApplicationContext):\n \"\"\"Remove duelist role from user\n\n Handled Cases:\n - Role does not exist in guild\n - User does not have duelist role\n \"\"\"\n embed = d.Embed(color=PRIMARY_COLOR)\n for role in ctx.guild.roles: # type: ignore\n if role.name == \"duelist\":\n for user_role in ctx.user.roles: # type: ignore\n if user_role.name == \"duelist\":\n await ctx.user.remove_roles(user_role) # type: ignore\n embed.description = (\n f\"{EMOJI_SUCCESS} Removed {role.mention} role\"\n )\n await ctx.respond(embed=embed, ephemeral=True)\n return\n embed.description = (\n f\"{EMOJI_WARNING} You don't have {role.mention} role\"\n )\n await ctx.respond(embed=embed, ephemeral=True)\n return\n embed.description = f\"{EMOJI_WARNING} This server does not have `duelist` role. Please use /duelist_create_role to create the role\"\n await ctx.respond(embed=embed, ephemeral=True)\n\n\ndef setup(bot):\n bot.add_cog(Duelist(bot))\n", "repo_name": "roundspecs/khelile-ayyun", "sub_path": "bot/cogs/duelist.py", "file_name": "duelist.py", "file_ext": "py", "file_size_in_byte": 3756, "program_lang": "python", "lang": "en", "doc_type": "code", "dataset": "github-code", "pt": "76", "api": [{"api_name": "discord.ext.commands.Cog", "line_number": 6, "usage_type": "attribute"}, {"api_name": "discord.ext.commands", "line_number": 6, "usage_type": "name"}, {"api_name": "bot.constants", "line_number": 8, "usage_type": "name"}, {"api_name": "discord.ApplicationContext", "line_number": 12, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 19, "usage_type": "call"}, {"api_name": "bot.constants.PRIMARY_COLOR", "line_number": 19, "usage_type": "name"}, {"api_name": "bot.constants.EMOJI_WARNING", "line_number": 21, "usage_type": "name"}, {"api_name": "bot.constants.EMOJI_WARNING", "line_number": 26, "usage_type": "name"}, {"api_name": "bot.constants.EMOJI_SUCCESS", "line_number": 30, "usage_type": "name"}, {"api_name": "discord.ext.commands.slash_command", "line_number": 10, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 10, "usage_type": "name"}, {"api_name": "discord.guild_only", "line_number": 11, "usage_type": "call"}, {"api_name": "discord.ApplicationContext", "line_number": 35, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 42, "usage_type": "call"}, {"api_name": "bot.constants.PRIMARY_COLOR", "line_number": 42, "usage_type": "name"}, {"api_name": "bot.constants.EMOJI_WARNING", "line_number": 46, "usage_type": "name"}, {"api_name": "bot.constants.EMOJI_SUCCESS", "line_number": 53, "usage_type": "name"}, {"api_name": "bot.constants.EMOJI_WARNING", "line_number": 56, "usage_type": "name"}, {"api_name": "discord.ext.commands.slash_command", "line_number": 33, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 33, "usage_type": "name"}, {"api_name": "discord.guild_only", "line_number": 34, "usage_type": "call"}, {"api_name": "discord.ApplicationContext", "line_number": 61, "usage_type": "attribute"}, {"api_name": "discord.Embed", "line_number": 68, "usage_type": "call"}, {"api_name": "bot.constants.PRIMARY_COLOR", "line_number": 68, "usage_type": "name"}, {"api_name": "bot.constants.EMOJI_SUCCESS", "line_number": 75, "usage_type": "name"}, {"api_name": "bot.constants.EMOJI_WARNING", "line_number": 80, "usage_type": "name"}, {"api_name": "bot.constants.EMOJI_WARNING", "line_number": 84, "usage_type": "name"}, {"api_name": "discord.ext.commands.slash_command", "line_number": 59, "usage_type": "call"}, {"api_name": "discord.ext.commands", "line_number": 59, "usage_type": "name"}, {"api_name": "discord.guild_only", "line_number": 60, "usage_type": "call"}, {"api_name": "bot.constants.add_cog", "line_number": 89, "usage_type": "call"}, {"api_name": "bot.constants", "line_number": 89, "usage_type": "name"}]} +{"seq_id": "42998906900", "text": "import flask\nfrom flask import abort, render_template\nimport sys\n\nsys.path.insert(0, \"/var/www/flask/error_as_a_service\")\napp = flask.Flask(__name__)\nfrom error_as_a_service import app as application\n\n\n@app.route('/')\ndef hello_world():\n return render_template('index.html')\n\n@app.route('/')\ndef error_page(error_code):\n abort(int(error_code))\n\n\nif __name__ == '__main__':\n app.run(debug=False)\n\n", "repo_name": "jeremiah1066/error_as_a_service", "sub_path": "error_as_a_service.py", "file_name": "error_as_a_service.py", "file_ext": "py", "file_size_in_byte": 417, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "sys.path.insert", "line_number": 5, "usage_type": "call"}, {"api_name": "sys.path", "line_number": 5, "usage_type": "attribute"}, {"api_name": "flask.Flask", "line_number": 6, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 12, "usage_type": "call"}, {"api_name": "flask.abort", "line_number": 16, "usage_type": "call"}]} +{"seq_id": "1791119634", "text": "import logging\n\nimport gobject\nimport hippo\nimport gconf\n\nfrom sugar.graphics import style\nfrom sugar.graphics.icon import CanvasIcon\nfrom sugar.graphics.xocolor import XoColor\n\nfrom jarabe.view.buddymenu import BuddyMenu\nfrom jarabe.model.buddy import get_owner_instance\nfrom jarabe.model import friends\nfrom jarabe.desktop.friendview import FriendView\nfrom jarabe.desktop.spreadlayout import SpreadLayout\n\n\nclass GroupBox(hippo.Canvas):\n __gtype_name__ = 'SugarGroupBox'\n\n def __init__(self):\n logging.debug('STARTUP: Loading the group view')\n\n gobject.GObject.__init__(self)\n\n self._box = hippo.CanvasBox()\n self._box.props.background_color = style.COLOR_WHITE.get_int()\n self.set_root(self._box)\n\n self._friends = {}\n\n self._layout = SpreadLayout()\n self._box.set_layout(self._layout)\n\n client = gconf.client_get_default()\n color = XoColor(client.get_string('/desktop/sugar/user/color'))\n\n self._owner_icon = CanvasIcon(icon_name='computer-xo', cache=True,\n xo_color=color)\n self._owner_icon.props.size = style.LARGE_ICON_SIZE\n\n self._owner_icon.set_palette(BuddyMenu(get_owner_instance()))\n self._layout.add(self._owner_icon)\n\n friends_model = friends.get_model()\n\n for friend in friends_model:\n self.add_friend(friend)\n\n friends_model.connect('friend-added', self._friend_added_cb)\n friends_model.connect('friend-removed', self._friend_removed_cb)\n\n def add_friend(self, buddy_info):\n icon = FriendView(buddy_info)\n self._layout.add(icon)\n\n self._friends[buddy_info.get_key()] = icon\n\n def _friend_added_cb(self, data_model, buddy_info):\n self.add_friend(buddy_info)\n\n def _friend_removed_cb(self, data_model, key):\n icon = self._friends[key]\n self._layout.remove(icon)\n del self._friends[key]\n icon.destroy()\n\n def do_size_allocate(self, allocation):\n width = allocation.width\n height = allocation.height\n\n min_w_, icon_width = self._owner_icon.get_width_request()\n min_h_, icon_height = self._owner_icon.get_height_request(icon_width)\n x = (width - icon_width) / 2\n y = (height - icon_height) / 2\n self._layout.move(self._owner_icon, x, y)\n\n hippo.Canvas.do_size_allocate(self, allocation)\n", "repo_name": "nemesiscodex/JukyOS-sugar", "sub_path": "src/jarabe/desktop/groupbox.py", "file_name": "groupbox.py", "file_ext": "py", "file_size_in_byte": 2402, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "76", "api": [{"api_name": "hippo.Canvas", "line_number": 18, "usage_type": "attribute"}, {"api_name": "logging.debug", "line_number": 22, "usage_type": "call"}, {"api_name": "gobject.GObject.__init__", "line_number": 24, "usage_type": "call"}, {"api_name": "gobject.GObject", "line_number": 24, "usage_type": "attribute"}, {"api_name": "hippo.CanvasBox", "line_number": 26, "usage_type": "call"}, {"api_name": "sugar.graphics.style.COLOR_WHITE.get_int", "line_number": 27, "usage_type": "call"}, {"api_name": "sugar.graphics.style.COLOR_WHITE", "line_number": 27, "usage_type": "attribute"}, {"api_name": "sugar.graphics.style", "line_number": 27, "usage_type": "name"}, {"api_name": "jarabe.desktop.spreadlayout.SpreadLayout", "line_number": 32, "usage_type": "call"}, {"api_name": "gconf.client_get_default", "line_number": 35, "usage_type": "call"}, {"api_name": "sugar.graphics.xocolor.XoColor", "line_number": 36, "usage_type": "call"}, {"api_name": "sugar.graphics.icon.CanvasIcon", "line_number": 38, "usage_type": "call"}, {"api_name": "sugar.graphics.style.LARGE_ICON_SIZE", "line_number": 40, "usage_type": "attribute"}, {"api_name": "sugar.graphics.style", "line_number": 40, "usage_type": "name"}, {"api_name": "jarabe.view.buddymenu.BuddyMenu", "line_number": 42, "usage_type": "call"}, {"api_name": "jarabe.model.buddy.get_owner_instance", "line_number": 42, "usage_type": "call"}, {"api_name": "jarabe.model.friends.get_model", "line_number": 45, "usage_type": "call"}, {"api_name": "jarabe.model.friends", "line_number": 45, "usage_type": "name"}, {"api_name": "jarabe.desktop.friendview.FriendView", "line_number": 54, "usage_type": "call"}, {"api_name": "hippo.Canvas.do_size_allocate", "line_number": 78, "usage_type": "call"}, {"api_name": "hippo.Canvas", "line_number": 78, "usage_type": "attribute"}]} +{"seq_id": "11569923373", "text": "#import necessary packages\r\nfrom flask import Flask,render_template,request,redirect,url_for,send_from_directory\r\nimport os\r\nimport urllib.request\r\nfrom werkzeug.utils import secure_filename\r\nfrom object_detection import *\r\nfrom helmet_detection import *\r\nimport time\r\nfrom number_plate_detection import *\r\nfrom super_resolution import *\r\n#import pytesseract\r\n\r\nUPLOAD_FOLDER = 'static/uploads/'\r\nvideofile = \"C:/Users/suman/PROJECTS/major_project/SIHProject/Flask_API/static/uploads/\"\r\nFrames_Folder_path = \"C:/Users/suman/PROJECTS/major_project/SIHProject/Flask_API/static/uploads/Frames_Folder\"\r\nOutput_Folder_path = \"C:/Users/suman/PROJECTS/major_project/SIHProject/Flask_API/static/uploads/Output_Frames\"\r\nHelmet_Folder_path = \"C:/Users/suman/PROJECTS/major_project/SIHProject/Flask_API/static/uploads/Helmet_Folder\"\r\n\r\nslash = \"/\"\r\nTotal_time_taken_by_object_detection = 0 \r\nTotal_time_taken_by_Helmet_detection = 0\r\nTotal_time_taken_by_NumberPlate_detection = 0\r\napp = Flask(__name__) \r\napp.secret_key = \"secret key\"\r\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\r\napp.config['MAX_CONTENT_LENGTH'] = 30 * 1024 * 1024 \r\n\r\n\r\n\r\n@app.route(\"/\") \r\ndef main():\r\n return render_template(\"upload.html\")\r\n\r\ndef get_image_paths(folder_path):\r\n image_paths = []\r\n for filename in os.listdir(folder_path):\r\n if filename.endswith(\".jpg\") or filename.endswith(\".png\"):\r\n image_paths.append(os.path.join(folder_path, filename))\r\n return image_paths\r\n\r\n@app.route(\"/\",methods=['POST']) \r\ndef upload_video():\r\n file = request.files['file']\r\n if file.filename == \"\":\r\n print(\"No image is selected\") \r\n return redirect(request.url)\r\n else:\r\n filename = secure_filename(file.filename) \r\n print(\"type of file -----> \",type(filename))\r\n print(\"filename : \",filename)\r\n global videofile\r\n if videofile[:4] != \".mp4\":\r\n videofile += filename \r\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\r\n print('Video successfully uploaded and displayed below')\r\n return render_template('upload.html', filename=filename)\r\n\r\n@app.route('/display/')\r\ndef display_video(filename):\r\n print('display_video filename: ' + filename)\r\n return redirect(url_for('static', filename='uploads/' + filename), code=301)\r\n\r\n@app.route('/frames')\r\ndef generate(): \r\n global videofile\r\n videofile = os.path.join(app.config['UPLOAD_FOLDER'], videofile)\r\n print(\"This is the video file path: \", videofile)\r\n status = Capture_frames(videofile, Frames_Folder_path)\r\n if status:\r\n image_filenames = os.listdir(Frames_Folder_path)\r\n return render_template(\"frames.html\", image_filenames=image_filenames)\r\n else:\r\n return \"Error capturing frames from the video.\"\r\n #print(\"this is a video file ---> \",videofile)\r\n #status = Capture_frames(videofile,Frames_Folder_path) \r\n #print(Frames_Folder_path)\r\n #return render_template(\"frames.html\",Frames_Folder_path=Frames_Folder_path)\r\n\r\n@app.route('/detectobjects')\r\ndef detect_object():\r\n classes = object_classes()\r\n i = 0\r\n global Total_time_taken_by_object_detection\r\n for files in os.listdir(Frames_Folder_path):\r\n\r\n #to measure the detection time taken by the above function \r\n start_time = time.time()\r\n Predict_objects(Frames_Folder_path+slash+files,Output_Folder_path,files,Frames_Folder_path,classes)\r\n end_time = time.time()\r\n Total_time_taken_by_object_detection += end_time-start_time\r\n i += 1\r\n print(i)\r\n if(i==len(os.listdir(Frames_Folder_path))):\r\n break;\r\n print(\"exited()\") \r\n image_paths = Store_paths_inside_textfile()\r\n print(\"--------------------------------------IMAGE PATHS-------------------------------\")\r\n print(image_paths)\r\n print(\"--------------------------------------------------------------------------------\")\r\n #return render_template('display.html',image_names=image_paths)\r\n # Get the image filenames from the output folder\r\n image_filenames = get_image_paths(Output_Folder_path)\r\n print(image_filenames)\r\n #modify the structure of image file names\"\r\n image_filenames2 = []\r\n for p in image_filenames:\r\n image_filenames2.append(p[88:]) \r\n return render_template(\"display.html\", image_filenames=image_filenames2,message=Total_time_taken_by_object_detection)\r\n \r\n #return render_template(\"display.html\",Output_Folder_path=Output_Folder_path,message=Total_time_taken_by_object_detection)\r\n\r\n#@app.route('/')\r\n#def send_image(filename):\r\n# print(filename,\"filename is ----------------------------------------------------\")\r\n# return send_from_directory(directory=\"C:/Users/suman/PROJECTS/major_project/SIHProject/Flask_API/Output_Frames\",filename=filename[1:])\r\n\r\n@app.route('/detectobjects')\r\n#store the paths in a text file\r\n# writing to file\r\ndef Store_paths_inside_textfile():\r\n print(\"-----------------------\")\r\n print(\"The Frames which contain Two wheelers are : \",two_wheeler_frame_path)\r\n text_filepath = \"C:/Users/suman/PROJECTS/major_project/SIHProject/configuration_files/helmet_path_files\"\r\n file1 = open(text_filepath, 'w')\r\n file1.writelines(two_wheeler_frame_path)\r\n file1.close() \r\n # remove newlines \r\n image_paths = []\r\n for f in two_wheeler_frame_path:\r\n image_paths.append(f[69:-1])\r\n return image_paths\r\n\r\n@app.route('/helmetdetection') \r\ndef detect_helmet():\r\n global Total_time_taken_by_Helmet_detection\r\n #open file having paths of helmet frames\r\n with open(\"C:/Users/suman/PROJECTS/major_project/SIHProject/configuration_files/helmet_path_files\") as f:\r\n lines = [line.rstrip() for line in f]\r\n \r\n '''l[48:-4] gives name of file in path ex - o/p : Frame_43,Frame_44,Frame_46,Frame_51,Frame_52,Frame_53,Frame_82,Frame_84,Frame_87 ''' \r\n for filepath in lines:\r\n start_time1 = time.time()\r\n print(filepath[84:],\"it is filepath 84\")\r\n Predict_helmets(Helmet_Folder_path,filepath,filepath[84:])\r\n end_time1 = time.time()\r\n Total_time_taken_by_Helmet_detection += end_time1-start_time1\r\n Store_paths_inside_textfile2()\r\n image_filenames = get_image_paths(Helmet_Folder_path)\r\n #modify the structure of image file names\"\r\n image_filenames2 = []\r\n for p in image_filenames:\r\n image_filenames2.append(p[88:]) \r\n print(\"------------------------>\",image_filenames)\r\n return render_template(\"helmets.html\", image_filenames=image_filenames2,message=Total_time_taken_by_Helmet_detection) \r\n #return render_template(\"helmets.html\",Output_Folder_path=Helmet_Folder_path,message=Total_time_taken_by_Helmet_detection)\r\n\r\n@app.route('/helmetdetection') \r\ndef Store_paths_inside_textfile2():\r\n #print the paths and store them in a text file\r\n print(violation_paths)\r\n #store the paths in a text file\r\n # writing to file\r\n text_filepath2 = \"C:/Users/suman/PROJECTS/major_project/SIHProject/configuration_files/violation_path_files\"\r\n file2 = open(text_filepath2, 'w')\r\n file2.writelines(violation_paths)\r\n file2.close() \r\n\r\n\r\n@app.route('/numberplate')\r\ndef detect_numberplates():\r\n output_dir = \"C:/Users/suman/PROJECTS/major_project/SIHProject/Flask_API/static/uploads/Lisence_Plates_Folder\"\r\n number_plate_detections = {}\r\n global Total_time_taken_by_NumberPlate_detection\r\n for files in os.listdir(output_dir):\r\n filepath = output_dir+slash+files\r\n try:\r\n start_time1 = time.time()\r\n left_x,top_y,width,height = Predict(output_dir,filepath,files[:3]+\"-output.jpg\")\r\n image = cv2.imread(output_dir+\"/\"+files[:3]+\"-output.jpg\")\r\n cropped_img = image[top_y:(top_y+height), left_x:(left_x+width)]\r\n cv2.imwrite(output_dir+\"/\"+files[:3]+\"-output_plate.jpg\",cropped_img)\r\n #extractedInfo = pytesseract.image_to_string(cropped_img)\r\n #print(extractedInfo)\r\n end_time1 = time.time()\r\n Total_time_taken_by_NumberPlate_detection += end_time1-start_time1\r\n number_plate_detections[files[:3]+\"-output.jpg\"] = [left_x,top_y,width,height]\r\n except TypeError:\r\n continue \r\n #fetch number plate results paths \r\n numberplate_image_paths = get_image_paths(output_dir) \r\n numberplate_image_paths2 = []\r\n for p in numberplate_image_paths:\r\n numberplate_image_paths2.append(p[96:]) \r\n #print(\"-->number plate path : \",numberplate_image_paths2[0])\r\n return render_template('numberplate.html',image_filenames=numberplate_image_paths2,Output_Folder_path=output_dir,message=Total_time_taken_by_NumberPlate_detection) \r\n\r\n@app.route('/superresolution')\r\ndef super_resolution():\r\n image_paths_2 = get_image_paths(\"C:/Users/suman/PROJECTS/major_project/SIHProject/Flask_API/static/uploads/Lisence_Plates_Folder\") \r\n new_image_paths_2 = []\r\n for p in image_paths_2:\r\n print(p)\r\n if(p[-9::] == \"plate.jpg\"):\r\n new_image_paths_2.append(p) \r\n print(\"the number plates are : \",new_image_paths_2) \r\n for p in new_image_paths_2: \r\n superresolution(p)\r\n modified_path = \"C:/Users/suman/PROJECTS/major_project/SIHProject/Flask_API/static/uploads/Lisence_Plates\" \r\n image_filenames_path = get_image_paths(modified_path)\r\n #modify the structure of image file names\"\r\n image_filenames_path2 = []\r\n for p in image_filenames_path:\r\n image_filenames_path2.append(p[89:]) \r\n print(\"------------------------>\",image_filenames_path2)\r\n return render_template('superresolution.html',image_filenames=image_filenames_path2)\r\n\r\napp.static_folder = 'static'\r\nif __name__ == \"__main__\":\r\n app.run(debug=True) ", "repo_name": "BVRITHY/2019-Major-Projects", "sub_path": "SECTION-C/Team_C10/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 9506, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 3, "dataset": "github-code", "pt": "76", "api": [{"api_name": "flask.Flask", "line_number": 23, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 32, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 36, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 38, "usage_type": "call"}, {"api_name": "os.path", "line_number": 38, "usage_type": "attribute"}, {"api_name": "flask.request.files", "line_number": 43, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 43, "usage_type": "name"}, {"api_name": "flask.redirect", "line_number": 46, "usage_type": "call"}, {"api_name": "flask.request.url", "line_number": 46, "usage_type": "attribute"}, {"api_name": "flask.request", "line_number": 46, "usage_type": "name"}, {"api_name": "werkzeug.utils.secure_filename", "line_number": 48, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 54, "usage_type": "call"}, {"api_name": "os.path", "line_number": 54, "usage_type": "attribute"}, {"api_name": "flask.render_template", "line_number": 56, "usage_type": "call"}, {"api_name": "flask.redirect", "line_number": 61, "usage_type": "call"}, {"api_name": "flask.url_for", "line_number": 61, "usage_type": "call"}, {"api_name": "os.path.join", "line_number": 66, "usage_type": "call"}, {"api_name": "os.path", "line_number": 66, "usage_type": "attribute"}, {"api_name": "os.listdir", "line_number": 70, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 71, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 84, "usage_type": "call"}, {"api_name": "time.time", "line_number": 87, "usage_type": "call"}, {"api_name": "time.time", "line_number": 89, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 93, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 108, "usage_type": "call"}, {"api_name": "time.time", "line_number": 142, "usage_type": "call"}, {"api_name": "time.time", "line_number": 145, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 154, "usage_type": "call"}, {"api_name": "os.listdir", "line_number": 174, "usage_type": "call"}, {"api_name": "time.time", "line_number": 177, "usage_type": "call"}, {"api_name": "time.time", "line_number": 184, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 195, "usage_type": "call"}, {"api_name": "flask.render_template", "line_number": 215, "usage_type": "call"}]} +{"seq_id": "24878980677", "text": "import os\n\nimport pygame\nimport time\nimport random as r\nimport patterns\n\nfrom random import randrange, randint, uniform\nfrom pygame import display, draw\nfrom datetime import datetime\n\npatterns.init_patterns()\n\nscreen_size = (600, 600)\ngame = pygame.init()\nscreen = display.set_mode(screen_size)\nclock = pygame.time.Clock()\n\nwidth = 2.5\nheight = width\n\ndraw_chance = 10\ndraw_index = 0 # len(patterns.pattern_list) - 1\ndraw_modifier = 1\nFPS = 0\n\nin_game = True\ndraw_delay = False\nrandomize_color = False\nforce_random_color = False\nclear_screen = False\nrequire_left_mouse_held = True\nleft_mouse_held = False\nupdate_bg_color = False\nreset_bg_color = False\nmouse_visible = True\n\ncolor = (255, 0, 0)\nbg_color = (0, 0, 0)\nmouse_pos = (0, 0)\n# mouse_c = (-10, -10)\n\ntext_to_render = [\n {\n \"name\": \"draw_mod\",\n \"font\": pygame.font.SysFont(name='Times New Roman', size=15),\n \"clear_h\": 15,\n \"clear_w\": 125,\n \"color\": (0, 255, 0),\n \"draw_pos\": (screen_size[0] // 2 - 50, 10),\n \"text\": \"Draw multiplier: {0}\"\n }\n]\n\n\ndef draw_rect(x, y, width=1, height=1, color=(255, 0, 0)):\n draw.rect(screen, color, [x, y, width, height])\n\n\ndef random_color():\n return (randint(0, 255), randint(0, 255), randint(0, 255))\n\n\ndef apply_draw_mod():\n if draw_modifier <= 0:\n return 1\n return draw_modifier\n\n\ndef l_shift_pressed():\n return pygame.key.get_mods() & pygame.KMOD_LSHIFT\n\n\ndef l_ctrl_pressed():\n return pygame.key.get_mods() & pygame.KMOD_LCTRL\n\n\nif __name__ == '__main__':\n\n # game loop\n while in_game:\n if left_mouse_held and mouse_visible or not left_mouse_held and not mouse_visible:\n pygame.mouse.set_visible(False if mouse_visible else True)\n mouse_visible = not mouse_visible\n\n # Event handling\n for e in pygame.event.get():\n if e.type == pygame.QUIT:\n in_game = False\n\n # name press event handling\n if e.type == pygame.KEYDOWN:\n if e.key == pygame.K_ESCAPE:\n in_game = False\n elif e.key == pygame.K_r:\n reset_bg_color = True\n elif e.key == pygame.K_n:\n draw_index = draw_index + 1 if draw_index < len(patterns.pattern_list) - 1 else 0\n elif e.key == pygame.K_SPACE:\n color = random_color()\n elif e.key == pygame.K_c:\n force_random_color = not force_random_color\n elif e.key == pygame.K_x:\n color = random_color()\n elif e.key == pygame.K_f:\n update_bg_color = True\n elif e.key == pygame.K_MINUS and draw_modifier != 0:\n draw_modifier -= 1\n elif e.key == pygame.K_EQUALS:\n draw_modifier += 1\n elif e.key == pygame.K_LEFTBRACKET:\n if l_shift_pressed() and l_ctrl_pressed():\n draw_modifier -= 20\n elif l_shift_pressed():\n draw_modifier -= 10\n else:\n draw_modifier -= 5\n if draw_modifier < 0: draw_modifier = 0\n elif e.key == pygame.K_RIGHTBRACKET:\n if l_shift_pressed() and l_ctrl_pressed():\n draw_modifier += 20\n elif l_shift_pressed():\n draw_modifier += 10\n else:\n draw_modifier += 5\n\n if e.type == pygame.MOUSEMOTION:\n mouse_pos = e.pos\n\n if e.type == pygame.MOUSEBUTTONDOWN or e.type == pygame.MOUSEBUTTONUP and e.button == 1:\n left_mouse_held = not left_mouse_held\n\n # Randomize Color\n if force_random_color:\n color = random_color()\n\n # Rendering shapes\n if require_left_mouse_held and left_mouse_held or not require_left_mouse_held:\n p = patterns.get_pattern_at(draw_index)\n s = p.size if draw_modifier != 0 else (1, 1)\n s = s[0] * apply_draw_mod(), s[1] * apply_draw_mod()\n\n for point in p.points:\n draw_rect(mouse_pos[0] + point[0], mouse_pos[1] + point[1], s[0],\n s[1], color=color)\n\n # Rendering text\n for f in text_to_render:\n draw_color = f['color']\n render_text = f['text']\n fill_x, fill_y = f['draw_pos'][0], f['draw_pos'][1] + 3\n\n if 'random' in draw_color: draw_color = random_color()\n if 'multiplier' in render_text.lower():\n render_text = render_text.format(draw_modifier)\n\n label = f['font'].render(render_text, 1, draw_color)\n draw_rect(fill_x, fill_y, len(render_text) * 7, f['clear_h'], color=bg_color)\n screen.blit(label, f['draw_pos'])\n\n # Updating display\n if update_bg_color:\n update_bg_color = False\n clear_screen = True\n bg_color = random_color()\n\n if reset_bg_color:\n reset_bg_color = False\n clear_screen = True\n bg_color = (0, 0, 0)\n\n if clear_screen:\n screen.fill(bg_color)\n clear_screen = False\n\n if draw_delay and randint(1, draw_chance) == draw_chance:\n pygame.display.update()\n else:\n display.update()\n\n FPS = int(clock.get_fps())\n display.set_caption(f'FPS: {FPS}')\n clock.tick(60_0000)\n", "repo_name": "sharkbound/Python-Projects", "sub_path": "pygameapps/pygame_sketch/main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 5556, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 6, "dataset": "github-code", "pt": "76", "api": [{"api_name": "patterns.init_patterns", "line_number": 12, "usage_type": "call"}, {"api_name": "pygame.init", "line_number": 15, "usage_type": "call"}, {"api_name": "pygame.display.set_mode", "line_number": 16, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 16, "usage_type": "name"}, {"api_name": "pygame.time.Clock", "line_number": 17, "usage_type": "call"}, {"api_name": "pygame.time", "line_number": 17, "usage_type": "attribute"}, {"api_name": "pygame.font.SysFont", "line_number": 46, "usage_type": "call"}, {"api_name": "pygame.font", "line_number": 46, "usage_type": "attribute"}, {"api_name": "pygame.draw.rect", "line_number": 57, "usage_type": "call"}, {"api_name": "pygame.draw", "line_number": 57, "usage_type": "name"}, {"api_name": "random.randint", "line_number": 61, "usage_type": "call"}, {"api_name": "pygame.key.get_mods", "line_number": 71, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.KMOD_LSHIFT", "line_number": 71, "usage_type": "attribute"}, {"api_name": "pygame.key.get_mods", "line_number": 75, "usage_type": "call"}, {"api_name": "pygame.key", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pygame.KMOD_LCTRL", "line_number": 75, "usage_type": "attribute"}, {"api_name": "pygame.mouse.set_visible", "line_number": 83, "usage_type": "call"}, {"api_name": "pygame.mouse", "line_number": 83, "usage_type": "attribute"}, {"api_name": "pygame.event.get", "line_number": 87, "usage_type": "call"}, {"api_name": "pygame.event", "line_number": 87, "usage_type": "attribute"}, {"api_name": "pygame.QUIT", "line_number": 88, "usage_type": "attribute"}, {"api_name": "pygame.KEYDOWN", "line_number": 92, "usage_type": "attribute"}, {"api_name": "pygame.K_ESCAPE", "line_number": 93, "usage_type": "attribute"}, {"api_name": "pygame.K_r", "line_number": 95, "usage_type": "attribute"}, {"api_name": "pygame.K_n", "line_number": 97, "usage_type": "attribute"}, {"api_name": "patterns.pattern_list", "line_number": 98, "usage_type": "attribute"}, {"api_name": "pygame.K_SPACE", "line_number": 99, "usage_type": "attribute"}, {"api_name": "pygame.K_c", "line_number": 101, "usage_type": "attribute"}, {"api_name": "pygame.K_x", "line_number": 103, "usage_type": "attribute"}, {"api_name": "pygame.K_f", "line_number": 105, "usage_type": "attribute"}, {"api_name": "pygame.K_MINUS", "line_number": 107, "usage_type": "attribute"}, {"api_name": "pygame.K_EQUALS", "line_number": 109, "usage_type": "attribute"}, {"api_name": "pygame.K_LEFTBRACKET", "line_number": 111, "usage_type": "attribute"}, {"api_name": "pygame.K_RIGHTBRACKET", "line_number": 119, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEMOTION", "line_number": 127, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONDOWN", "line_number": 130, "usage_type": "attribute"}, {"api_name": "pygame.MOUSEBUTTONUP", "line_number": 130, "usage_type": "attribute"}, {"api_name": "patterns.get_pattern_at", "line_number": 139, "usage_type": "call"}, {"api_name": "random.randint", "line_number": 176, "usage_type": "call"}, {"api_name": "pygame.display.update", "line_number": 177, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 177, "usage_type": "attribute"}, {"api_name": "pygame.display.update", "line_number": 179, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 179, "usage_type": "name"}, {"api_name": "pygame.display.set_caption", "line_number": 182, "usage_type": "call"}, {"api_name": "pygame.display", "line_number": 182, "usage_type": "name"}]} +{"seq_id": "9134671695", "text": "from functools import partial\nfrom tkinter import *\nfrom Parser import Parser\nimport shelve\nfrom tkinter.messagebox import showerror\n# globally declare the expression variable\nclass Calculator:\n\n def __init__(self):\n self.expression = \"\"\n self.window = Tk()\n self.parser = Parser()\n self.window.title(\"Calculator\")\n self.equation = StringVar()\n self.entry = Entry(self.window, textvariable=self.equation)\n self.entry.grid(row=1)\n buttonFrame = Frame(self.window)\n buttonFrame.grid(row=3)\n for i in range(9):\n Button(buttonFrame, command=partial(self.press, i + 1), text=str(i + 1), height=4, width=4) \\\n .grid(row=i // 3, column=i % 3)\n\n btnEquals = Button(buttonFrame, text='=', command=lambda: self.result(), height=4, width=4) \\\n .grid(row=4, column=0)\n btnZero = Button(buttonFrame, command=partial(self.press, 0), text=str(0), height=4, width=4) \\\n .grid(row=4, column=1)\n btnClean = Button(buttonFrame, command=lambda :self.clean(), text='C', height=4, width=4) \\\n .grid(row=4, column=2)\n btnLeftBracket = Button(buttonFrame, command=partial(self.press, ')'), text=')', height=4, width=4) \\\n .grid(row=0, column=4)\n btnRightBracket = Button(buttonFrame, command=partial(self.press, '('), text='(', height=4, width=4) \\\n .grid(row=1, column=4)\n btnPlus = Button(buttonFrame, command=partial(self.press, '+'), text='+', height=4, width=4) \\\n .grid(row=0, column=5)\n btnMinus = Button(buttonFrame, command=partial(self.press, '-'), text='-', height=4, width=4) \\\n .grid(row=1, column=5)\n # btnProd = Button(buttonFrame, command=partial(self.press, '*'), text='*', height=4, width=4) .grid(row=2, column=5)\n self.window.mainloop()\n\n\n def clean(self):\n self.expression = \"\"\n self.equation.set(self.expression)\n def press(self, num):\n self.expression = self.expression + str(num)\n self.equation.set(self.expression)\n\n\n def result(self):\n print(\"Result\")\n result = self.parser.parse(self.expression)\n self.equation.set(result)\n\n\ndef makeWidget():\n\n global equation\n\n\nif __name__ == '__main__':\n c = Calculator()\n", "repo_name": "DmitrySavkin/calculator", "sub_path": "main.py", "file_name": "main.py", "file_ext": "py", "file_size_in_byte": 2314, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1, "dataset": "github-code", "pt": "77", "api": [{"api_name": "Parser.Parser", "line_number": 12, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 20, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 25, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 29, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 31, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 33, "usage_type": "call"}, {"api_name": "functools.partial", "line_number": 35, "usage_type": "call"}]} +{"seq_id": "21483035978", "text": "#!/usr/bin/python3\n\nimport sys\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nuser_data_url = 'https://si3.ufc.br/public/restauranteConsultarSaldo.do'\nmenu_url = 'http://www.ufc.br/restaurante/cardapio/1-restaurante-universitario-de-fortaleza'\ndata = {\n 'codigoCartao': sys.argv[1],\n 'matriculaAtreladaCartao': sys.argv[2]\n}\n\ndef convert_to_dict(list_):\n \"\"\"\n Converts a list into a dict, eg:\n ['a', 1, 'b', 2] -> {'a': 1, 'b': 2}\n\n Args:\n list_: a list to be converted\n \"\"\"\n it = iter( list_ )\n \n return dict( zip(it,it) )\n\ndef print_user_credits(r):\n \"\"\"\n Extracts the username and the amount of credits in the card\n\n Args:\n r: a requests.models.Response\n \"\"\"\n \n raw_data = BeautifulSoup( r.text, 'html.parser' )\n raw_data.find( 'tbody' ).text\n\n user_info = raw_data.find( 'tbody' ).text.split('\\n')\n \n # Removing empty strings from the list\n user_info = [ no_empty_str for no_empty_str in user_info if no_empty_str ]\n\n info_dict = convert_to_dict( user_info )\n\n print( \"Usuário: {}\\nCréditos: {}\\n\".format(\n info_dict.get( 'Nome:','Indisponível' ), info_dict.get( 'Créditos:','Indisponível' ) ))\n\ndef print_menu(r):\n \"\"\"\n Get the menu of the day\n\n Args:\n r: a requests.models.Response\n \"\"\"\n\n raw_data = BeautifulSoup( r.text, 'html.parser' )\n almoco = raw_data.find( class_='refeicao almoco' ).find_all( class_='desc' )\n jantar = raw_data.find( class_='refeicao jantar' ).find_all( class_='desc' )\n\n print( 'Cardápio:' )\n print( ' Almoço: ')\n for item in almoco:\n print( ' ', item.text )\n \n print( ' Jantar' )\n for item in jantar:\n print( ' ', item.text )\n\ntry:\n r = requests.post( user_data_url, data=data )\n print_user_credits( r )\n\nexcept requests.exceptions.ConnectionError as e:\n print( e )\n print( 'Erro ao tentar obter a quantidade de créditos do usuário.' )\n\ntry:\n r = requests.get( menu_url )\n print_menu( r )\n\nexcept requests.exceptions.ConnectionError:\n print( 'Erro ao tentar obter o cardápio do dia.' ) \n", "repo_name": "alvesRenan/extratoRU", "sub_path": "extrato_ru.py", "file_name": "extrato_ru.py", "file_ext": "py", "file_size_in_byte": 2024, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "sys.argv", "line_number": 11, "usage_type": "attribute"}, {"api_name": "sys.argv", "line_number": 12, "usage_type": "attribute"}, {"api_name": "bs4.BeautifulSoup", "line_number": 35, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 56, "usage_type": "call"}, {"api_name": "requests.post", "line_number": 70, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 73, "usage_type": "attribute"}, {"api_name": "requests.get", "line_number": 78, "usage_type": "call"}, {"api_name": "requests.exceptions", "line_number": 81, "usage_type": "attribute"}]} +{"seq_id": "29440572386", "text": "import pymysql\n\nhost = \"127.0.0.1\"\nport = 3306\nusername = \"root\"\npassword = \"root\"\ndatabase = \"crew1\"\n\n\nclass ConnMysql(object):\n\n def __init__(self):\n self.conn = pymysql.Connect(host=host, port=port, user=username, password=password, database=database)\n self.cursor = self.conn.cursor()\n\n def exAndCom(self, sql):\n self.cursor.execute(sql)\n self.conn.commit()\n\n def innsert(self, **kwargs):\n id_ = kwargs.get(\"id\")\n name = kwargs.get(\"name\")\n detail = kwargs.get(\"detail\")\n path = kwargs.get(\"path\")\n sql = str.format(\"insert into info1(id, name, detail, path) value ({0}, {1}, {2}, {3})\", id_, name, detail, path)\n self.exAndCom(sql)\n\n def delete(self, id_):\n sql = str.format(\"delete from info1 where id = {0}\", id_)\n self.exAndCom(sql)\n\n def update(self, **kwargs):\n sql = \"update info1 set \"\n if kwargs.get(\"id\") is None:\n return\n i = 0\n for k, v in kwargs.items():\n i += 1\n if k != \"id\":\n if i == len(kwargs):\n sql = str.format(\"{0}{1}={2}\", sql, k, v)\n else:\n sql = str.format(\"{0}{1}={2},\", sql, k, v)\n sql += str.format(\" where id={0}\", kwargs.get(\"id\"))\n self.exAndCom(sql)\n\n def query(self, **kwargs):\n sql = \"select * from info1\"\n if len(kwargs) > 0:\n flag = True\n sql += \" where\"\n for k, v in kwargs.items():\n if flag:\n flag = False\n sql = str.format(\" {0} {1}={2}\", sql, k, v)\n else:\n sql = str.format(\"{0} and {1} = {2}\", sql, k, v)\n self.cursor.execute(sql)\n return self.cursor\n\n\ndef main():\n map = {\n \"id\": 1,\n \"name\": 2,\n \"detail\": 23,\n \"path\": 234\n }\n mapU = {\n \"id\": 1,\n \"name\": 222,\n \"detail\": 222,\n \"path\": 222\n }\n conn = ConnMysql()\n conn.innsert(**map)\n conn.update(**mapU)\n print(\"--------add after----------\")\n for c in conn.query():\n print(c)\n conn.delete(1)\n print(\"--------delete after-------\")\n for c in conn.query():\n print(c)\n\n\nif __name__ == '__main__':\n main()\n\n", "repo_name": "zhangwei19910109/zwpython", "sub_path": "connMysql.py", "file_name": "connMysql.py", "file_ext": "py", "file_size_in_byte": 2299, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "pymysql.Connect", "line_number": 13, "usage_type": "call"}]} +{"seq_id": "74938272244", "text": "from struct import unpack\nfrom zlib import decompress\nimport sys\nimport re\nimport hashlib\nimport bs4\nfrom dataclasses import dataclass\nfrom torch.utils.data import Dataset\nfrom transformers import PreTrainedTokenizer\nimport logging\nimport os\nimport torch\nimport pickle\nfrom typing import Optional, List, Tuple\n\nlogger = logging.getLogger(__name__)\n\n\n# Helpers for beautiful soup\ndef find_at_most_one(bs, *args, **kwargs):\n t = bs.find_all(*args, **kwargs)\n if not t:\n return t\n elif len(t) > 1:\n raise InvalidParseAssumptionError(\"Too many found!\")\n else:\n return t[0]\n\n\ndef find_exactly_one(bs, *args, **kwargs):\n t = bs.find_all(*args, **kwargs)\n if not t:\n raise InvalidParseAssumptionError(\"Not enough tags found!\")\n elif len(t) > 1:\n raise InvalidParseAssumptionError(\"Too many found!\")\n else:\n return t[0]\n\n\ndef find_at_least_one(bs, *args, **kwargs):\n t = bs.find_all(*args, **kwargs)\n if not t:\n raise InvalidParseAssumptionError(\"Not enough tags found!\")\n return t\n\n\nclass InvalidParseAssumptionError(RuntimeError):\n pass\n\n\n@dataclass\nclass Pronounciation:\n text: str\n type: str\n\n\n@dataclass\nclass Definition:\n pos_modifier: Optional[str]\n definition: str\n examples: List[str]\n topic: Optional[str]\n dates: List[str]\n\n\n@dataclass\nclass ReferenceDefinition:\n pos_modifier: Optional[str]\n reference: str\n\n\n@dataclass\nclass Sense:\n pos: Optional[str]\n definitions: List[Definition]\n\n\n@dataclass\nclass Entry:\n word: str\n variant: Optional[int]\n senses: List[Sense]\n pronounciations: List[Pronounciation]\n phrases: List[Definition]\n phrasal_verbs: List[Tuple[str, List[Definition]]]\n origin: Optional[str]\n derivatives: List[str]\n notes: List[str]\n\n\n@dataclass\nclass DictionaryDefinition:\n title: str\n entry_str: str\n parsed_entry: Optional[bs4.BeautifulSoup] = None\n\n @classmethod\n def gen_from_apple_dictionary(cls, f):\n f.seek(0x40)\n limit = 0x40 + unpack(\"i\", f.read(4))[0]\n f.seek(0x60)\n while f.tell() < limit:\n (sz,) = unpack(\"i\", f.read(4))\n buf = decompress(f.read(sz)[8:])\n for m in re.finditer(b\" 1:\n logging.warning(f\"Too many topics found: {topic_spans}, picking first one\")\n\n local_pos_modifier_span = entry_span.find(\"span\", class_=\"gg\", recursive=False)\n local_pos_modifier = local_pos_modifier_span and local_pos_modifier_span.get_text().strip()\n\n definition = definition_span.get_text().strip()\n examples = [e.get_text().strip().strip(\":\").strip() for e in example_spans]\n topic = topic_spans and topic_spans[0].get_text().strip()\n\n date_spans = definition_span(\"span\", class_=\"dg\")\n dates = [e.get_text().strip() for e in date_spans]\n\n definitions.append(\n Definition(\n pos_modifier=local_pos_modifier or global_pos_modifier,\n definition=definition,\n examples=examples,\n topic=topic,\n dates=dates,\n )\n )\n elif xrg_spans:\n for xrg in xrg_spans:\n referenced_terms = find_at_least_one(xrg, \"span\", class_=\"xr\")\n\n for referenced_term in referenced_terms:\n reference = referenced_term.get_text().strip()\n definitions.append(ReferenceDefinition(pos_modifier=global_pos_modifier, reference=reference,))\n elif entry_span.find(\"span\", class_=\"ex\"):\n logger.warning(f\"Silently ignoring example without corresponding definition {entry_span}\")\n else:\n raise InvalidParseAssumptionError(f\"Weird span: {entry_span}\")\n\n return definitions\n\n @classmethod\n def parse_sense(cls, parsed_entry):\n pos_spans = parsed_entry(\"span\", class_=\"tg_pos\")\n if len(pos_spans) > 1:\n pos = \" \".join([e.get_text().strip() for e in pos_spans])\n elif not pos_spans:\n pos_span = find_at_most_one(parsed_entry, \"span\", class_=\"posg\")\n pos = pos_span.get_text().strip() if pos_span else None\n else:\n pos = pos_spans[0].get_text().strip()\n\n if parsed_entry.findChildren(\"span\", class_=\"se2\"):\n sense_definitions = []\n for c in parsed_entry.children:\n if set(c[\"class\"]) & set((\"tg_pos\", \"posg\", \"x_xdh\")):\n continue\n elif not c.get_text().strip():\n continue\n elif \"se2\" in c[\"class\"]:\n sense_definitions.extend(cls.parse_sense_definitions(c))\n elif \"note\" in c[\"class\"]:\n logger.warning(f\"Dropping note in word sense {c}\")\n elif \"msDict\" in c[\"class\"]:\n logger.warning(f\"Dropping unexpected msDict in sense {c}\")\n else:\n raise InvalidParseAssumptionError(f\"WEIRD TAG: {c}\")\n else:\n sense_definitions = cls.parse_sense_definitions(parsed_entry)\n\n if not sense_definitions:\n raise InvalidParseAssumptionError(\"No sense definitions!\")\n return Sense(pos=pos, definitions=sense_definitions)\n\n @classmethod\n def parse_derivatives(cls, parsed_entry):\n words = find_at_least_one(parsed_entry, \"span\", class_=\"l\")\n return [e.get_text().strip() for e in words]\n\n @classmethod\n def parse_origin(cls, parsed_entry):\n etym_type = find_exactly_one(parsed_entry, \"span\", class_=\"tg_etym\", recursive=False)\n if etym_type.get_text().strip() != \"ORIGIN\":\n raise InvalidParseAssumptionError(f\"Unexpected etym type: {etym_type}\")\n\n origin_span = find_exactly_one(parsed_entry, \"span\", class_=\"x_xo1\")\n origin = origin_span.get_text().strip()\n return origin\n\n @classmethod\n def parse_phrasal_verbs(cls, parsed_entry):\n subentries = find_at_least_one(parsed_entry, \"span\", class_=\"subEntry\")\n ret = []\n for subentry in subentries:\n word_span = subentry.find(\"span\", class_=\"x_xoh\") or subentry.find(\"span\", class_=\"x_xot\")\n if subentry.find(\"span\", class_=\"msDict\"):\n definitions = cls.parse_sense_definitions(subentry)\n else:\n definitions = []\n ret.append((word_span.get_text().strip(), definitions))\n return ret\n\n @classmethod\n def parse(cls, parsed_entry):\n entry = find_exactly_one(parsed_entry, \"d:entry\")\n head_entry = find_exactly_one(entry, \"span\", class_=\"hg\")\n defn_entry = find_exactly_one(entry, \"span\", class_=\"sg\")\n\n head_word_span = find_exactly_one(head_entry, \"span\", class_=\"hw\")\n word = \" \".join([t.strip() for t in head_word_span.contents if type(t) == bs4.element.NavigableString]).strip()\n\n variant_span = find_at_most_one(head_word_span, \"span\", class_=\"tg_hw\")\n variant = int(variant_span.get_text()) if variant_span else None\n\n pronounciations = cls.parse_pronounciations(head_entry)\n\n senses = defn_entry(\"span\", class_=\"se1\")\n if len(senses) == 0:\n raise InvalidParseAssumptionError(f\"No senses found!\")\n\n senses = []\n for c in defn_entry.children:\n if \"se1\" in c[\"class\"]:\n senses.append(cls.parse_sense(c))\n elif c.get_text().strip():\n raise InvalidParseAssumptionError(f\"Weird tag found in definition: {c.prettify()}!\")\n\n phrases = []\n origin = None\n subentries = entry.find_all(\"span\", class_=\"t_derivatives\") # derivatives # TODO: verify\n derivatives = []\n phrasal_verbs = []\n notes = []\n\n for subentry in entry.children:\n if subentry == head_entry or subentry == defn_entry:\n continue\n elif \"t_phrases\" in subentry[\"class\"]:\n phrases = cls.parse_sense_definitions(subentry)\n elif \"t_derivatives\" in subentry[\"class\"]:\n derivatives = cls.parse_derivatives(subentry)\n elif \"t_phrasalVerbs\" in subentry[\"class\"]:\n phrasal_verbs = cls.parse_phrasal_verbs(subentry)\n elif \"etym\" in subentry[\"class\"]:\n origin = cls.parse_origin(subentry)\n elif \"note\" in subentry[\"class\"]:\n notes.append(subentry.get_text())\n else:\n raise InvalidParseAssumptionError(f\"Weird entry found: {subentry}\")\n\n # TODO: determine other direct children types\n return Entry(\n word=word,\n variant=variant,\n pronounciations=pronounciations,\n senses=senses,\n phrases=phrases,\n phrasal_verbs=phrasal_verbs,\n origin=origin,\n derivatives=derivatives,\n notes=notes,\n )\n\n\ndef generate_words(\n tokenizer,\n model,\n allow_proper_nouns=True,\n blacklist=(),\n prefix=\"\",\n num=100,\n batch_size=50,\n max_length=400,\n max_iterations=20,\n):\n ret = []\n num_iteration = 0\n\n input = tokenizer.encode(prefix, return_tensors=\"pt\").to(\"cuda\")\n\n while len(ret) < num and num_iteration < max_iterations:\n num_iteration += 1\n\n generated = model.generate(input, max_length=max_length, num_return_sequences=batch_size, temperature=1.0,)\n valid_i = 0\n\n for i in range(generated.size()[0]):\n sentence_tokens = generated[i, :].tolist()\n decoded = tokenizer.decode(sentence_tokens)\n m = re.search(r\"<title>(.*?)(.*)\", decoded)\n if m:\n title = m.group(1).strip()\n if not allow_proper_nouns and title[:1].upper() == title[:1]:\n continue\n elif title.upper() in blacklist or title.upper().rstrip(\"s\") in blacklist:\n continue\n else:\n ret.append(DictionaryDefinition(title=title, entry_str=m.group(2).rstrip(\"!\")))\n else:\n logger.warning(f'Unable to match regex in \"{decoded}\"')\n\n return ret\n", "repo_name": "turtlesoupy/this-word-does-not-exist", "sub_path": "title_maker_pro/dictionary_definition.py", "file_name": "dictionary_definition.py", "file_ext": "py", "file_size_in_byte": 13104, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 1010, "dataset": "github-code", "pt": "76", "api": [{"api_name": "logging.getLogger", "line_number": 16, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 51, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 59, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 61, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 62, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 63, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 57, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 68, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 66, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 74, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 75, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 72, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 81, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 82, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 83, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 84, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.Tuple", "line_number": 85, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 86, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 87, "usage_type": "name"}, {"api_name": "typing.List", "line_number": 88, "usage_type": "name"}, {"api_name": "dataclasses.dataclass", "line_number": 78, "usage_type": "name"}, {"api_name": "typing.Optional", "line_number": 95, "usage_type": "name"}, {"api_name": "bs4.BeautifulSoup", "line_number": 95, "usage_type": "attribute"}, {"api_name": "struct.unpack", "line_number": 100, "usage_type": "call"}, {"api_name": "struct.unpack", "line_number": 103, "usage_type": "call"}, {"api_name": "zlib.decompress", "line_number": 104, "usage_type": "call"}, {"api_name": "re.finditer", "line_number": 105, "usage_type": "call"}, {"api_name": "re.search", "line_number": 107, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 108, "usage_type": "call"}, {"api_name": "bs4.BeautifulSoup", "line_number": 109, "usage_type": "call"}, {"api_name": "dataclasses.dataclass", "line_number": 91, "usage_type": "name"}, {"api_name": "logging.warning", "line_number": 164, "usage_type": "call"}, {"api_name": "bs4.element", "line_number": 267, "usage_type": "attribute"}, {"api_name": "re.search", "line_number": 347, "usage_type": "call"}]} +{"seq_id": "25859418570", "text": "from zope.component import adapts\nfrom zope.interface import implements\n\nfrom themecontainer import FSThemeContainer\nfrom Products.GenericSetup.utils import XMLAdapterBase\nfrom Products.GenericSetup.utils import PropertyManagerHelpers\nfrom Products.GenericSetup.utils import exportObjects\nfrom Products.GenericSetup.utils import importObjects\n\nfrom Products.GenericSetup.interfaces import IBody\nfrom Products.GenericSetup.interfaces import ISetupEnviron\nfrom interfaces import IThemeContainer\n_marker = object()\n\nNAME = 'designer_themes'\n\nROOT_THEMES = '.cps_themes'\n\ndef importRootThemesContainer(context):\n \"\"\"Create the root themes container.\n \"\"\"\n logger = context.getLogger(NAME)\n logger.info(\"Creating the root themes container\")\n\n site = context.getSite()\n thc = getattr(site, ROOT_THEMES, None)\n if thc is None:\n thc = FSThemeContainer(ROOT_THEMES)\n thc.manage_changeProperties(title='Root themes')\n site._setObject(ROOT_THEMES, thc)\n thc = getattr(site, ROOT_THEMES)\n importObjects(thc, '', context)\n\n\ndef exportRootThemesContainer(context):\n \"\"\"Export the root themes container\n \"\"\"\n site = context.getSite()\n thc = getattr(site, ROOT_THEMES, None)\n if thc is None:\n logger = context.getLogger(NAME)\n logger.info(\"Nothing to export.\")\n return\n exportObjects(thc, '', context)\n\nclass ThemeContainerXMLAdapter(XMLAdapterBase, PropertyManagerHelpers):\n \"\"\"XML importer and exporter for theme containers\n \"\"\"\n\n adapts(IThemeContainer, ISetupEnviron)\n implements(IBody)\n\n _LOGGER_ID = NAME\n name = NAME\n\n def _exportNode(self):\n \"\"\"Export the object as a DOM node.\n \"\"\"\n node = self._getObjectNode('object')\n node.appendChild(self._extractProperties())\n self._logger.info(\"Root themes container exported.\")\n return node\n\n def _importNode(self, node):\n \"\"\"Import the object from the DOM node.\n \"\"\"\n if self.environ.shouldPurge():\n self._purgeProperties()\n self._initProperties(node)\n self._logger.info(\"Root theme container imported.\")\n\n", "repo_name": "nuxeo-cps/products--CPSDesignerThemes", "sub_path": "exportimport.py", "file_name": "exportimport.py", "file_ext": "py", "file_size_in_byte": 2144, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "themecontainer.FSThemeContainer", "line_number": 28, "usage_type": "call"}, {"api_name": "Products.GenericSetup.utils.importObjects", "line_number": 32, "usage_type": "call"}, {"api_name": "Products.GenericSetup.utils.exportObjects", "line_number": 44, "usage_type": "call"}, {"api_name": "Products.GenericSetup.utils.XMLAdapterBase", "line_number": 46, "usage_type": "name"}, {"api_name": "Products.GenericSetup.utils.PropertyManagerHelpers", "line_number": 46, "usage_type": "name"}, {"api_name": "zope.component.adapts", "line_number": 50, "usage_type": "call"}, {"api_name": "interfaces.IThemeContainer", "line_number": 50, "usage_type": "argument"}, {"api_name": "Products.GenericSetup.interfaces.ISetupEnviron", "line_number": 50, "usage_type": "argument"}, {"api_name": "zope.interface.implements", "line_number": 51, "usage_type": "call"}, {"api_name": "Products.GenericSetup.interfaces.IBody", "line_number": 51, "usage_type": "argument"}]} +{"seq_id": "6686290102", "text": "import gmpy2 as gp\r\n\"\"\"\r\nRSA ALGORITHM IMPLEMENTATION\r\nThe steps in an RSA Algorithm are\r\n1. Choose two prime numbers p, q.\r\n2. Let n = p * q\r\n3. Let φ = (p − 1)(q − 1)\r\n4. Choose a large number e ∈ [2, φ − 1]that is coprime to φ.\r\n5. Compute d ∈ [2, φ − 1] such that e × d = 1 (mod φ), and d must be coprime to φ\r\n6. (e, n) is the public key\r\n7. (d, n) is the private key\r\n8. Encryption: C = m^e(mod n)\r\n9. Decryption: m = C^d(mod n)\r\n\"\"\"\r\n\r\ndef encrypt(p, q, m):\r\n\tn = gp.mul(p, q)\r\n\tphi = gp.mul(gp.sub(p, 1), gp.sub(q, 1))\r\n\tif 65537 < phi and gp.gcd(65537, phi) == 1:\r\n\t\te = 65537\r\n\telif 257 < phi and gp.gcd(257, phi) == 1:\r\n\t\te = 257\r\n\telif 17 < phi and gp.gcd(17, phi) == 1:\r\n\t\te = 17\r\n\telif 5 < phi and gp.gcd(5, phi) == 1:\r\n\t\te = 5\r\n\telif 3 < phi and gp.gcd(3, phi) == 1:\r\n\t\te = 3\r\n\telse:\r\n\t\te = gp.sub(phi, 1)\r\n\td = gp.divm(1, e, phi) # returns d such that e * d = 1 mod phi; since 1 mod phi, d is coprime to n\r\n\tc = gp.powmod(m, e, n)\r\n\tprint('c: ', c)\r\n\tprint('e: ', e)\t\r\n\tprint('d: ', d)\r\n\tprint('n: ', n)\r\n\treturn c, d, n\r\n\r\ndef decrypt(c, d, n):\r\n\tm = gp.powmod(c, d, n)\r\n\tprint('m: ', m)\r\n\r\nprint('Enter p, q, m\\nConstraints:\\n* p self.last_update_id:\n self.last_update_id = delta.update_id\n for bid in delta.bids: self.update_bid(bid)\n for ask in delta.asks: self.update_ask(ask)\n\n def __unicode__(self):\n return \"DepthCache: %s\" % self.last_update_id\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n if six.PY3:\n return self.__unicode__()\n else:\n return unicode(self).encode('utf-8')\n\nclass Trade(ApiModel):\n def __init__(self, id=None, **kwargs):\n self.id = id\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n def __unicode__(self):\n return \"Trade: %s\" % self.id\n\nclass AggregateTrade(ApiModel):\n def __init__(self, id=None, **kwargs):\n self.id = id\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n @classmethod\n def object_from_dictionary(cls, entry):\n new_aggregate_trade = AggregateTrade(id=entry[\"a\"])\n\n new_aggregate_trade.price = entry[\"p\"]\n new_aggregate_trade.qty = entry[\"q\"]\n new_aggregate_trade.first_trade_id = entry[\"f\"]\n new_aggregate_trade.last_trade_id = entry[\"l\"]\n new_aggregate_trade.timestamp = entry[\"T\"]\n new_aggregate_trade.is_maker = entry[\"m\"]\n new_aggregate_trade.is_best_match = entry[\"M\"]\n\n return new_aggregate_trade\n\n def __unicode__(self):\n return \"AggregateTrade: %s\" % self.id\n\nclass Candlestick(ApiModel):\n def __init__(self, open_time, close_time, **kwargs):\n self.open_time = open_time\n self.close_time = close_time\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n @classmethod\n def object_from_dictionary(cls, entry):\n new_candlestick = Candlestick(open_time=entry[0], close_time=entry[6])\n \n new_candlestick.open = entry[1]\n new_candlestick.high = entry[2]\n new_candlestick.low = entry[3]\n new_candlestick.close = entry[4]\n new_candlestick.volume = entry[5]\n new_candlestick.quote_asset_volume = entry[7]\n new_candlestick.number_of_trades = entry[8]\n new_candlestick.base_asset_volume = entry[9]\n new_candlestick.quote_asset_volume = entry[10]\n\n return new_candlestick\n\n def __unicode__(self):\n return \"Candlestick: %s-%s\" % (self.open_time, self.close_time)\n\nclass Statistics(ApiModel):\n def __init__(self, first_id, last_id, **kwargs):\n self.first_id = first_id\n self.last_id = last_id\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n @classmethod\n def object_from_dictionary(cls, entry):\n new_statistics = Statistics(entry[\"firstId\"], entry[\"lastId\"])\n\n for key, value in six.iteritems(entry):\n setattr(new_statistics, camel_to_underline(str(key)), value)\n\n return new_statistics\n\n def __unicode__(self):\n return \"Statistics: %s-%s\" % (self.first_id, self.last_id)\n\nclass Price(ApiModel):\n def __init__(self, price, **kwargs):\n self.price = price\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n @classmethod\n def object_from_dictionary(cls, entry):\n new_price = Price(entry[\"price\"])\n new_price.symbol = entry[\"symbol\"]\n\n return new_price\n\n def __unicode__(self):\n return \"Price: %s(%s)\" % (self.symbol, self.price)\n\nclass Ticker(ApiModel):\n def __init__(self, symbol, **kwargs):\n self.symbol = symbol\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n @classmethod\n def object_from_dictionary(cls, entry):\n new_ticker = Ticker(entry[\"symbol\"])\n new_ticker.bid = Bid(entry[\"bidPrice\"], entry[\"bidQty\"])\n new_ticker.ask = Ask(entry[\"askPrice\"], entry[\"askQty\"])\n\n return new_ticker\n\n def __unicode__(self):\n return \"Ticker: %s\" % self.symbol\n\nclass Order(ApiModel):\n def __init__(self, id, **kwargs):\n self.id = id\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n \n @classmethod\n def object_from_dictionary(cls, entry):\n new_order = Order(entry[\"orderId\"])\n \n for key, value in six.iteritems(entry):\n setattr(new_order, camel_to_underline(str(key)), value)\n\n return new_order\n\n def __unicode__(self):\n return \"Order: %s\" % self.id\n\nclass Balance(ApiModel):\n def __init__(self, asset, free, locked, **kwargs):\n self.asset = asset\n self.free = free\n self.locked = locked\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n def __unicode__(self):\n return \"Balance: %s\" % self.asset\n\nclass Account(ApiModel):\n def __init__(self, *args, **kwargs):\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n def balances_dict(self):\n return dict([(balance[\"asset\"], balance) for balance in self.balances])\n\n @classmethod\n def object_from_dictionary(cls, entry):\n new_account = Account()\n \n for key, value in six.iteritems(entry):\n setattr(new_account, camel_to_underline(str(key)), value)\n\n new_account.balances = []\n new_account.balances_dict = {}\n for balance in entry[\"balances\"]:\n new_account.balances.append(Balance.object_from_dictionary(balance))\n\n return new_account\n\n def __unicode__(self):\n return \"Account\"\n\nclass Deposit(ApiModel):\n def __init__(self, *args, **kwargs):\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n @classmethod\n def object_from_dictionary(cls, entry):\n deposit_list = []\n for deposit in entry[\"depositList\"]:\n new_deposit = Deposit()\n new_deposit.insert_time = deposit[\"insertTime\"]\n new_deposit.amount = deposit[\"amount\"]\n new_deposit.asset = deposit[\"asset\"]\n new_deposit.status = deposit[\"status\"]\n deposit_list.append(new_deposit)\n return deposit_list\n\n def __unicode__(self):\n return \"Deposit\"\n\nclass Withdraw(ApiModel):\n def __init__(self, *args, **kwargs):\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n @classmethod\n def object_from_dictionary(cls, entry):\n withdraw_list = []\n for withdraw in entry[\"withdrawList\"]:\n new_withdraw = WithDraw()\n new_withdraw.amount = withdraw[\"amount\"]\n new_withdraw.address = withdraw[\"address\"]\n new_withdraw.asset = withdraw[\"asset\"]\n new_withdraw.apply_time = withdraw[\"applyTime\"]\n new_withdraw.status = withdraw[\"status\"]\n withdraw_list.append(new_withdraw)\n return withdraw_list\n\n def __unicode__(self):\n return \"Withdraw\"\n\nclass DepthUpdateEvent(ApiModel):\n EVENT_TYPE = \"depthUpdate\" \n\n def __init__(self, update_id, **kwargs):\n self.update_id = update_id\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n @classmethod\n def object_from_dictionary(cls, entry):\n new_depth_delta = DepthUpdateEvent(entry[\"u\"])\n\n new_depth_delta.event_type = entry[\"e\"]\n new_depth_delta.event_time = entry[\"E\"]\n new_depth_delta.symbol = entry[\"s\"]\n new_depth_delta.bids = []\n new_depth_delta.asks = []\n\n for bid in entry[\"b\"]:\n new_depth_delta.bids.append(Bid.object_from_dictionary(bid))\n\n for ask in entry[\"a\"]:\n new_depth_delta.asks.append(Ask.object_from_dictionary(ask))\n\n return new_depth_delta\n\n def __unicode__(self):\n return \"DepthDeltaEvent: %s\" % self.update_id\n\nclass KLineEvent(ApiModel):\n EVENT_TYPE = \"kline\" \n\n def __init__(self, start_time, end_time, **kwargs):\n self.start_time = start_time\n self.end_time = end_time\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n @classmethod\n def object_from_dictionary(cls, entry):\n new_kline = KLineEvent(entry[\"k\"][\"t\"], entry[\"k\"][\"T\"])\n\n new_kline.event_type = entry[\"e\"]\n new_kline.event_time = entry[\"E\"]\n entry = entry[\"k\"]\n new_kline.symbol = entry[\"s\"]\n new_kline.interval = entry[\"i\"]\n new_kline.first_trade_id = entry[\"f\"]\n new_kline.last_trade_id = entry[\"L\"]\n new_kline.open = entry[\"o\"]\n new_kline.close = entry[\"c\"]\n new_kline.high = entry[\"h\"]\n new_kline.low = entry[\"l\"]\n new_kline.volume = entry[\"v\"]\n new_kline.number_of_trades = entry[\"n\"]\n new_kline.is_final = entry[\"x\"]\n new_kline.quote_volume = entry[\"q\"]\n new_kline.active_buy_volume = entry[\"V\"]\n new_kline.active_buy_quote_volume = entry[\"Q\"]\n\n return new_kline\n\n def __unicode__(self):\n return \"KLineEvent: %s-%s\" % (self.start_time, self.end_time)\n\nclass AggregateTradeEvent(ApiModel):\n EVENT_TYPE = \"aggTrade\" \n\n def __init__(self, event_time, **kwargs):\n self.event_time = event_time\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n @classmethod\n def object_from_dictionary(cls, entry):\n new_aggregate_trade = AggregateTradeEvent(event_time=entry[\"E\"])\n\n new_aggregate_trade.event_type = entry[\"e\"]\n new_aggregate_trade.symbol = entry[\"s\"]\n new_aggregate_trade.price = entry[\"p\"]\n new_aggregate_trade.qty = entry[\"q\"]\n new_aggregate_trade.first_breakdown_trade_id = entry[\"f\"]\n new_aggregate_trade.last_breakdown_trade_id = entry[\"l\"]\n new_aggregate_trade.trade_time = entry[\"T\"]\n new_aggregate_trade.is_maker = entry[\"m\"]\n\n return new_aggregate_trade\n\n def __unicode__(self):\n return \"AggregateTradeEvent: %s\" % self.event_time\n\nclass OutBoundAccountInfoEvent(ApiModel):\n EVENT_TYPE = \"outboundAccountInfo\" \n\n def __init__(self, event_time, **kwargs):\n self.event_time = event_time\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n @classmethod\n def object_from_dictionary(cls, entry):\n new_event = OutBoundAccountInfoEvent(event_time=entry[\"E\"])\n\n new_event.event_type = entry[\"e\"]\n new_event.balances = []\n for balance in entry[\"B\"]:\n new_event.balances.append(Balance(balance[\"a\"], balance[\"f\"], balance[\"l\"]))\n\n return new_event\n\n def __unicode__(self):\n return \"OutBoundAccountInfoEvent: %s\" % self.event_time\n\nclass ExecutionReportEvent(ApiModel):\n EVENT_TYPE = \"executionReport\" \n\n def __init__(self, event_time, **kwargs):\n self.event_time = event_time\n for key, value in six.iteritems(kwargs):\n setattr(self, key, value)\n\n @classmethod\n def object_from_dictionary(cls, entry):\n new_event = ExecutionReportEvent(event_time=entry[\"E\"])\n\n new_event.event_type = entry[\"e\"]\n new_event.symbol = entry[\"s\"]\n new_event.new_client_order_id = entry[\"c\"]\n new_event.side = entry[\"S\"]\n new_event.order_type = entry[\"o\"]\n new_event.time_in_force = entry[\"f\"]\n new_event.original_quantity = entry[\"q\"]\n new_event.price = entry[\"p\"]\n new_event.execution_type = entry[\"x\"]\n new_event.order_status = entry[\"X\"]\n new_event.order_reject_reason = entry[\"r\"]\n new_event.order_id = entry[\"i\"]\n new_event.last_filled_trade_quantity = entry[\"l\"]\n new_event.filled_trade_accumulated_quantity = entry[\"z\"]\n new_event.last_filled_trade_price = entry[\"L\"]\n new_event.commission = entry[\"n\"]\n new_event.commission_asset = entry[\"N\"]\n new_event.order_time = entry[\"T\"]\n new_event.trade_time = entry[\"T\"]\n new_event.trade_id = entry[\"t\"]\n new_event.is_maker = entry[\"m\"]\n\n return new_event\n\n def __unicode__(self):\n return \"ExecutionReportEvent: %s\" % self.event_time \n\nclass UserDataEvent(object):\n\n @classmethod\n def object_from_dictionary(cls, entry):\n if entry[\"e\"] == OutBoundAccountInfoEvent.EVENT_TYPE:\n return OutBoundAccountInfoEvent.object_from_dictionary(entry)\n elif entry[\"e\"] == ExecutionReportEvent.EVENT_TYPE:\n return ExecutionReportEvent.object_from_dictionary(entry)\n", "repo_name": "cnfuyu/python-binance-api", "sub_path": "binance/models.py", "file_name": "models.py", "file_ext": "py", "file_size_in_byte": 16609, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 31, "dataset": "github-code", "pt": "76", "api": [{"api_name": "six.PY3", "line_number": 29, "usage_type": "attribute"}, {"api_name": "six.iteritems", "line_number": 36, "usage_type": "call"}, {"api_name": "six.PY3", "line_number": 157, "usage_type": "attribute"}, {"api_name": "six.iteritems", "line_number": 165, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 174, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 198, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 224, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 231, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 242, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 258, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 275, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 282, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 295, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 303, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 313, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 328, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 348, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 372, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 402, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 437, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 463, "usage_type": "call"}, {"api_name": "six.iteritems", "line_number": 485, "usage_type": "call"}]} +{"seq_id": "29444696298", "text": "# -*- coding: utf-8 -*-\n###############################################################################\n# License, author and contributors information in: #\n# __manifest__.py file at the root folder of this module. #\n###############################################################################\n\nimport json\nimport logging\nimport requests\nimport werkzeug\nfrom werkzeug import urls\n\nfrom odoo import http, fields\nfrom odoo.http import request\nfrom odoo.service import common\n\n_logger = logging.getLogger(__name__)\n\nversion_info = common.exp_version()\nserver_serie = version_info.get('server_serie')\n\n\nclass SSLCommerzController(http.Controller):\n _success_url = '/payment/sslcommerz/success/'\n _fail_url = '/payment/sslcommerz/fail/'\n _cancel_url = '/payment/sslcommerz/cancel/'\n _ipn_url = '/payment/sslcommerz/ipn/'\n\n def _get_return_url(self, **post):\n \"\"\" Extract the return URL from the data coming from sslcommerz. \"\"\"\n return_url = post.pop('return_url', '')\n\n if not return_url:\n return_url = '/shop/payment/validate'\n\n return return_url\n\n # Newly added\n @http.route([\n '/payment/sslcommerz/return/',\n '/payment/sslcommerz/cancel/',\n ], type='http', auth='public', csrf=False)\n def sslcommerz_form_feedback(self, **post):\n if post:\n request.env['payment.transaction'].sudo(\n ).form_feedback(post, 'sslcommerz')\n base_url = request.env['ir.config_parameter'].sudo(\n ).get_param('web.base.url')\n return request.render('bose_ssl_commerz.payment_sslcommerz_redirect', {\n 'return_url': urls.url_join(base_url, \"/payment/process\")\n })\n\n def sslcommerz_validate_data(self, **post):\n \"\"\"\n SSLCOMMERZ Receive Payment Notification (IPN): Validate Payment with IPN\n\n As IPN URL already set in panel. All the payment notification will reach through IPN prior to user return back. So it needs validation for amount and transaction properly.\n\n The IPN will send a POST REQUEST with below parameters. Grab the post notification with your desired platform ( PHP: $_POST)\n Once data is validated, process it.\n\n Doc: 'https://developer.sslcommerz.com/doc/v4/'\n \"\"\"\n _logger.info(\n '**************** sslcommerz_validate_data *******************')\n res = False\n model_name = post.get('value_a') \n rec_id = post.get('value_b') \n reference = post.get('value_c') \n val_id = post.get('val_id')\n tx = None\n\n if reference:\n domain = [('reference', 'like', reference)]\n if model_name == 'sale.order':\n domain = [('sale_order_ids', '=', int(rec_id))]\n\n tx = request.env['payment.transaction'].sudo().search(\n domain, order='id desc', limit=1)\n\n if 'payment.transaction' in post.get('value_d'):\n tx_id = post.get('value_d').split(\"&\")[0].replace(\"payment.transaction(\",\"\").replace(\")\", \"\").replace(\",\",\"\")\n domain = [('id', '=', int(tx_id))]\n tx = request.env['payment.transaction'].sudo().search(\n domain, order='id desc', limit=1)\n\n if not tx:\n # we have seemingly received a notification for a payment that did not come from\n # odoo, acknowledge it otherwise sslcommerz will keep trying\n _logger.warning(\n 'received notification for unknown payment reference')\n return False\n\n sslcommerz_urls = request.env['payment.acquirer']._get_sslcommerz_urls(\n tx and tx.acquirer_id and tx.acquirer_id.state or 'prod')\n validate_url = sslcommerz_urls['sslcommerz_validation_url']\n acquirer_id = request.env['payment.acquirer'].search(\n [('provider', '=', 'sslcommerz')])\n store_id = acquirer_id.sslcommerz_store_id\n store_passwd = acquirer_id.sslcommerz_store_passwd\n new_post = dict(store_id=store_id,\n store_passwd=store_passwd, \n val_id=val_id)\n urequest_json = self._sslcommerz_validate_url(validate_url, new_post)\n _logger.info(\"\\nValidation Response:\\n\" + str(urequest_json))\n\n res = ''\n txn_key = post.get(\"val_id\")\n \n print(\"IPN Status: \" + str(post.get('status')))\n print(\"Order Validation Status: \" + str(urequest_json.get('status')))\n\n\n if post.get('status') == \"VALID\" and (urequest_json.get(\"status\") == 'VALID' or urequest_json.get(\"status\") == 'VALIDATED'):\n res = \"status=approved&transactionKey={}\".format(txn_key)\n\n date_validate = fields.datetime.now()\n if post.get(\"date_stamp\"):\n date_validate = post.get('date_stamp', fields.datetime.now())\n\n if tx:\n session = {}\n tx_data = tx.get_tx_values({}, post)\n tx_data.update({\n \"state\": \"done\",\n \"acquirer_reference\": post.get(\"val_id\"),\n \"type\": \"validation\",\n \"date\": date_validate,\n })\n tx.sudo().write(tx_data)\n tx._post_process_after_done()\n\n if tx.sale_order_ids:\n print(\"tx.sale_order_ids\")\n if float(tx.sale_order_ids[0].amount_total) == float(post.get('currency_amount')):\n if tx.sale_order_ids[0].state != 'sale':\n tx.sale_order_ids[0].sudo().action_confirm()\n res += \"&transaction_id=%s&sale_order_id=%s\" %(tx.id, tx.sale_order_ids[0].id)\n\n if tx.invoice_ids:\n print(\" tx.invoice_ids\")\n if float(tx.invoice_ids[0].amount_total) == float(post.get('currency_amount')):\n self.invoice_reg_payment(tx, post)\n value_id = post.get(\"value_d\")\n if len(value_id.split(\"&\")) > 1:\n session = value_id.split(\"&\")[1].replace(\"session=\",\"\").replace(\"'\",'\"')\n try:\n session = json.loads(session)\n except Exception as e:\n session = json.dumps(session)\n session = json.loads(session)\n new_session = dict(request.session).copy()\n if session:\n if request.session.get('uid') == None:\n request.session['uid'] = session.get('uid')\n if request.session.get('login') == None:\n request.session['login'] = session.get('login')\n if request.session.get('session_token') == None:\n request.session['session_token'] = session.get('session_token')\n res += \"&transaction_id=%s&invoice_id=%s\" %(tx.id, tx.invoice_ids[0].id)\n\n return res\n\n\n def _sslcommerz_validate_url(self, validate_url, new_post):\n \"\"\"\n Order Validation API\n Request Parameters\n PARAM NAME\t DATA TYPE\tDESCRIPTION\n val_id\t string (50)\tMandatory - A Validation ID against the successful transaction which is provided by SSLCOMMERZ.\n store_id\t string (30)\tMandatory - Your SSLCOMMERZ Store ID is the integration credential which can be collected through our managers\n store_passwd\tstring (30)\tMandatory - Your SSLCOMMERZ Store Password is the integration credential which can be collected through our managers\n format\t string (10)\tPredefined value is json or xml. This parameter is used to get the response in two different format such as json or xml. By default it returns json format.\n v\t integer (1)\tOpen for future use only.\n Returned Parameters\n PARAM NAME\t DATA TYPE\t DESCRIPTION\n status\t string (20)\t Transaction Status. This parameter needs to be checked before update your database as a successful transaction.\n VALID : A successful transaction.\n VALIDATED : A successful transaction but called by your end more than one.\n INVALID_TRANSACTION : Invalid validation id (val_id).\n ....\n ....\n risk_level\tinteger (1)\tTransaction's Risk Level - High (1) for most risky transactions and Low (0) for safe transactions. Please hold the service and proceed to collect customer verification documents\n status\tstring (20)\tOpen for future use only.\n risk_title\tstring (50)\tTransaction's Risk Level Description\n \"\"\"\n urequest = requests.get(validate_url, new_post)\n resp = urequest.text\n urequest_json = json.loads(resp)\n return urequest_json\n\n def invoice_reg_payment(self, tx, post):\n \"\"\"invoice_reg_payment\"\"\"\n if tx.invoice_ids[0].state != 'posted':\n tx.sale_order_ids[0].sudo().action_post()\n # Register Payment\n if tx.invoice_ids[0].amount_residual > 0 and float(tx.invoice_ids[0].amount_residual) == float(post.get('currency_amount')):\n invoice = tx.invoice_ids[0]\n AccountPayment = request.env['account.payment']\n APMethod = request.env['account.payment.method']\n payment_method_id = APMethod.sudo().search([\n ('name','=','Electronic'),\n ('code','=','electronic'),\n ('payment_type','=','inbound')\n ])\n\n payment_vals = {\n 'amount':tx.amount,\n 'currency_id':invoice.currency_id.id,\n 'journal_id':tx.acquirer_id.journal_id.id,\n 'date':fields.Datetime.now().date().strftime(\"%Y-%m-%d\"),\n 'payment_method_id':payment_method_id.id,\n 'partner_type':'customer',\n 'partner_id':invoice.partner_id.id,\n 'payment_type':'inbound',\n 'invoice_origin':tx.reference,\n 'payment_transaction_id':tx.id,\n # 'payment_token_id':tx.reference,\n 'ref':tx.reference,\n 'company_id': tx.acquirer_id.company_id.id,\n }\n payment = AccountPayment.sudo().create(payment_vals)\n invoice.sudo().write({'payment_id' : payment.id})\n\n print(\"payment_vals\")\n print(payment_vals)\n print(payment)\n print(\"-----------------------------------------\")\n\n\n\n @http.route('/payment/sslcommerz/ipn/', type='http', auth='none', methods=['POST'], csrf=False)\n def sslcommerz_ipn(self, **post):\n \"\"\" sslcommerz ipn \"\"\"\n _logger.info('****************************** /IPN')\n res = self.sslcommerz_validate_data(**post)\n return werkzeug.utils.redirect('/sslcommerz?{}'.format(res))\n\n @http.route('/payment/sslcommerz/success', type='http', auth=\"none\", methods=['POST'], csrf=False)\n def sslcommerz_success(self, **post):\n \"\"\" sslcommerz Success \"\"\"\n _logger.info('****************************** /success')\n return_url = self._get_return_url(**post)\n res = self.sslcommerz_validate_data(**post)\n\n\n if res:\n if 'sale_order_id' in res:\n for part in res.split(\"&\"):\n if part.split(\"=\")[0] == 'sale_order_id':\n if part.split(\"=\")[1] != False:\n if part.split(\"=\")[1] != request.session.get('sale_last_order_id'):\n request.session.update({'sale_last_order_id' : int(part.split(\"=\")[1]) })\n if 'invoice_id' in res:\n for part in res.split(\"&\"):\n if part.split(\"=\")[0] == 'invoice_id':\n if part.split(\"=\")[1] != False:\n invoice_id = part.split(\"=\")[1]\n invoice = request.env['account.move'].sudo().browse(int(invoice_id))\n return_url = '/my/invoices/' + invoice_id + '?access_token=%s' %invoice.access_token\n\n # print(\"res---->\\n\", res)\n # print(\"return_url---->\\n\", return_url)\n # print(\"&&&&&&&&&&&&&&&&&&&&&&&&&&&&\")\n # print(\"session\", request.session)\n\n return werkzeug.utils.redirect(return_url + '?{}'.format(res))\n else:\n return werkzeug.utils.redirect(self._cancel_url)\n\n\n @http.route('/payment/sslcommerz/cancel', type='http', auth=\"none\", methods=['POST'], csrf=False)\n def sslcommerz_cancel(self, **post):\n \"\"\" When the user cancels its sslcommerz payment: GET on this route \"\"\"\n _logger.info('****************************** /cancel')\n reference = post.get('val_id')\n if reference:\n sales_order_obj = request.env['sale.order']\n so_ids = sales_order_obj.sudo().search([('name', '=', reference)])\n if so_ids:\n '''return_url = '/shop/payment/get_status/' + str(so_ids[0])'''\n so = sales_order_obj.browse(so_ids[0].id)\n\n msg = \"/sslcommerz?status=cancelled&\"\n for key, value in post.items():\n msg += str(key)\n msg += '='\n msg += str(value)\n msg += '&'\n return werkzeug.utils.redirect(msg)\n\n # @http.route('/sslcommerz', type='http', auth='public', methods=['GET'], website=True)\n # def sslcommerz_status(self, **get):\n # _logger.info('****************************** /SSLCOMMERZ')\n # status = ''\n # transactionKey = ''\n # response_code = ''\n # message = ''\n # infoemail = ''\n # if 'status' in get:\n # status = get['status']\n # if 'transactionKey' in get:\n # transactionKey = get['transactionKey']\n # if 'response_code' in get:\n # response_code = get['response_code']\n # if 'message' in get:\n # message = get['message']\n\n # return request.render('bose_ssl_commerz.sslcommerz_status', {'status': status, 'transactionKey': transactionKey, 'response_code': response_code, 'message': message})\n", "repo_name": "real-ariful/hfmfurniture", "sub_path": "bose_ssl_commerz/controllers/ssl_commerz_controllers.py", "file_name": "ssl_commerz_controllers.py", "file_ext": "py", "file_size_in_byte": 14546, "program_lang": "python", "lang": "en", "doc_type": "code", "stars": 0, "dataset": "github-code", "pt": "76", "api": [{"api_name": "logging.getLogger", "line_number": 17, "usage_type": "call"}, {"api_name": "odoo.service.common.exp_version", "line_number": 19, "usage_type": "call"}, {"api_name": "odoo.service.common", "line_number": 19, "usage_type": "name"}, {"api_name": "odoo.http.Controller", "line_number": 23, "usage_type": "attribute"}, {"api_name": "odoo.http", "line_number": 23, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 45, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 45, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 47, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 47, "usage_type": "name"}, {"api_name": "odoo.http.request.render", "line_number": 49, "usage_type": "call"}, {"api_name": "odoo.http.request", "line_number": 49, "usage_type": "name"}, {"api_name": "werkzeug.urls.url_join", "line_number": 50, "usage_type": "call"}, {"api_name": "werkzeug.urls", "line_number": 50, "usage_type": "name"}, {"api_name": "odoo.http.route", "line_number": 39, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 39, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 78, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 78, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 84, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 84, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 94, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 94, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 97, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 97, "usage_type": "name"}, {"api_name": "odoo.fields.datetime.now", "line_number": 117, "usage_type": "call"}, {"api_name": "odoo.fields.datetime", "line_number": 117, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 117, "usage_type": "name"}, {"api_name": "odoo.fields.datetime.now", "line_number": 119, "usage_type": "call"}, {"api_name": "odoo.fields.datetime", "line_number": 119, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 119, "usage_type": "name"}, {"api_name": "json.loads", "line_number": 148, "usage_type": "call"}, {"api_name": "json.dumps", "line_number": 150, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 151, "usage_type": "call"}, {"api_name": "odoo.http.request.session", "line_number": 152, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 152, "usage_type": "name"}, {"api_name": "odoo.http.request.session.get", "line_number": 154, "usage_type": "call"}, {"api_name": "odoo.http.request.session", "line_number": 154, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 154, "usage_type": "name"}, {"api_name": "odoo.http.request.session", "line_number": 155, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 155, "usage_type": "name"}, {"api_name": "odoo.http.request.session.get", "line_number": 156, "usage_type": "call"}, {"api_name": "odoo.http.request.session", "line_number": 156, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 156, "usage_type": "name"}, {"api_name": "odoo.http.request.session", "line_number": 157, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 157, "usage_type": "name"}, {"api_name": "odoo.http.request.session.get", "line_number": 158, "usage_type": "call"}, {"api_name": "odoo.http.request.session", "line_number": 158, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 158, "usage_type": "name"}, {"api_name": "odoo.http.request.session", "line_number": 159, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 159, "usage_type": "name"}, {"api_name": "requests.get", "line_number": 187, "usage_type": "call"}, {"api_name": "json.loads", "line_number": 189, "usage_type": "call"}, {"api_name": "odoo.http.request.env", "line_number": 199, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 199, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 200, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 200, "usage_type": "name"}, {"api_name": "odoo.fields.Datetime.now", "line_number": 211, "usage_type": "call"}, {"api_name": "odoo.fields.Datetime", "line_number": 211, "usage_type": "attribute"}, {"api_name": "odoo.fields", "line_number": 211, "usage_type": "name"}, {"api_name": "werkzeug.utils.redirect", "line_number": 237, "usage_type": "call"}, {"api_name": "werkzeug.utils", "line_number": 237, "usage_type": "attribute"}, {"api_name": "odoo.http.route", "line_number": 232, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 232, "usage_type": "name"}, {"api_name": "odoo.http.request.session.get", "line_number": 252, "usage_type": "call"}, {"api_name": "odoo.http.request.session", "line_number": 252, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 252, "usage_type": "name"}, {"api_name": "odoo.http.request.session.update", "line_number": 253, "usage_type": "call"}, {"api_name": "odoo.http.request.session", "line_number": 253, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 253, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 259, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 259, "usage_type": "name"}, {"api_name": "werkzeug.utils.redirect", "line_number": 267, "usage_type": "call"}, {"api_name": "werkzeug.utils", "line_number": 267, "usage_type": "attribute"}, {"api_name": "werkzeug.utils.redirect", "line_number": 269, "usage_type": "call"}, {"api_name": "werkzeug.utils", "line_number": 269, "usage_type": "attribute"}, {"api_name": "odoo.http.route", "line_number": 239, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 239, "usage_type": "name"}, {"api_name": "odoo.http.request.env", "line_number": 278, "usage_type": "attribute"}, {"api_name": "odoo.http.request", "line_number": 278, "usage_type": "name"}, {"api_name": "werkzeug.utils.redirect", "line_number": 290, "usage_type": "call"}, {"api_name": "werkzeug.utils", "line_number": 290, "usage_type": "attribute"}, {"api_name": "odoo.http.route", "line_number": 272, "usage_type": "call"}, {"api_name": "odoo.http", "line_number": 272, "usage_type": "name"}]} +{"seq_id": "14084282438", "text": "import pytz\nfrom dateutil import parser\nfrom bs4 import BeautifulSoup\nimport utils\nimport config\nimport regex\n\n\ndef format_item(item, feedinfo, channelid):\n category = config.get_feed_category(channelid, feedinfo)\n format = config.get_format_by_category(channelid, category)\n\n title = utils.get_entry(item, 'title')\n pubdate = format_date(utils.get_entry(item, 'published'))\n author = utils.get_entry(item, 'author')\n link = f'[Link]({utils.get_entry(item, \"link\")})'\n author = utils.get_entry(item, 'author')\n hashtags = '\\\\%23\\\\'.join(feedinfo['tags'])\n name = feedinfo['name']\n\n desc = format_desc(utils.get_entry(item, 'description'))\n\n posttext = utils.format_str(format, name=name, author=author, title=title,\n body=desc, url=link, hashtags=hashtags, pubdate=pubdate)\n return posttext\n\n\ndef format_date(date: str):\n parsed = parser.parse(date)\n gmt = str(pytz.timezone('GMT').normalize(parsed))\n return gmt.replace('+00:00', ' GMT')\n\n\ndef format_desc(description: str):\n description = BeautifulSoup(description).prettify()\n soup = BeautifulSoup(description)\n\n ans = soup.find_all('a')\n for a in ans:\n ancl = a.get('href')\n if(regex.match('twitter.com/*./status/', ancl)):\n description = description.replace(\n a, '[quted tweet link](%s)' % ancl)\n else:\n description = description.replace(a, '[link](%s)' % ancl)\n description.replace(a, '[link](%s)' % ancl)\n\n brs = soup.find_all('br')\n for br in brs:\n description.replace(br, '\\n')\n\n imgs = soup.find_all('img')\n for img in imgs:\n description.replace(img, '[image](%s)' % img.get('src'))\n\n vids = soup.find_all('video')\n for vid in vids:\n description.replace(vid, '[video](%s)' % vid.get('src'))\n\n bs = soup.find_all(['b', 'strong'])\n for b in bs:\n description.replace(b, '*%s*' % b.string)\n\n itals = soup.find_all(['i', 'em'])\n for i in itals:\n description.replace(i, '_%s_' % i.string)\n\n heads = soup.find_all(['h1', 'h2', 'h3', 'h4', 'h5', 'h6'])\n for h in heads:\n description.replace(h, '▍*%s*\\n' % h.string)\n\n return regex.sub(r'([_*[]()~`>+-=|{}.!])', r'\\\\\\1', soup.get_text('\\n').replace('#', '\\\\%23\\\\'))\n\n\n# def format_description(description: str):\n# # replace img tags with markdown link\n# for img in regex.findall('', description):\n# imgl = regex.search(\n# '(http|ftp|https):\\/\\/([\\w_-]+(?:(?:\\.[\\w_-]+)+))([\\w.,@?^=%&:\\/~+#-]*[\\w@?^=%&\\/~+#-])', img).group()\n# description = description.replace(img, f'[image]({imgl})')\n\n# # replace video tags with markdown link\n# for vid in regex.findall('